diff --git a/.github/actions/install-pnl/action.yml b/.github/actions/install-pnl/action.yml index c4c019eda27..d5ec8de3949 100644 --- a/.github/actions/install-pnl/action.yml +++ b/.github/actions/install-pnl/action.yml @@ -23,6 +23,20 @@ runs: *) echo "Unsupported OS"; exit 1 ;; esac + - name: Setup Python venv + shell: bash + run: | + case "$RUNNER_OS" in + macOS* | Linux*) + export VIRTUAL_ENV='${{ runner.temp }}/_venv'; export PYTHON_LOC=$VIRTUAL_ENV/bin/ ;; + Windows*) + export VIRTUAL_ENV='${{ runner.temp }}\_venv'; export PYTHON_LOC=$VIRTUAL_ENV\\Scripts ;; + *) echo "Unsupported OS"; exit 1 ;; + esac + python -m venv $VIRTUAL_ENV + echo "VIRTUAL_ENV=$VIRTUAL_ENV" >> $GITHUB_ENV + echo "$PYTHON_LOC" >> $GITHUB_PATH + - name: Drop pytorch on x86 shell: bash run: | diff --git a/.github/dependabot.yml b/.github/dependabot.yml index e162542f922..4d702e5d68a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -16,6 +16,7 @@ updates: include: "scope" labels: - "CI" + rebase-strategy: "disabled" - package-ecosystem: "pip" directory: "/" # use top dir @@ -27,3 +28,4 @@ updates: labels: - "deps" open-pull-requests-limit: 15 + rebase-strategy: "disabled" diff --git a/.github/workflows/compare-comment.yml b/.github/workflows/compare-comment.yml index 7c8cb9cd0cb..61bf6896a5d 100644 --- a/.github/workflows/compare-comment.yml +++ b/.github/workflows/compare-comment.yml @@ -11,15 +11,17 @@ jobs: runs-on: ubuntu-latest if: github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'success' + permissions: + actions: read + pull-requests: write steps: - - name: 'Download docs artifacts' id: docs-artifacts - uses: actions/github-script@v4.1 + uses: actions/github-script@v5 with: script: | - var artifacts = await github.actions.listWorkflowRunArtifacts({ + var artifacts = await github.rest.actions.listWorkflowRunArtifacts({ owner: context.repo.owner, repo: context.repo.repo, run_id: ${{ github.event.workflow_run.id }}, @@ -44,7 +46,7 @@ jobs: var fs = require('fs'); for (artifact of docsArtifacts) { console.log('Downloading: ' + artifact.name); - var download = await github.actions.downloadArtifact({ + var download = await github.rest.actions.downloadArtifact({ owner: context.repo.owner, repo: context.repo.repo, artifact_id: artifact.id, @@ -68,7 +70,7 @@ jobs: (diff -r docs-base docs-head && echo 'No differences!' || true) | tee ./result.diff - name: Post comment with docs diff - uses: actions/github-script@v4.1 + uses: actions/github-script@v5 with: script: | var fs = require('fs'); @@ -77,7 +79,7 @@ jobs: console.log('Posting diff to PR: ' + issue_number); - github.issues.createComment({ + github.rest.issues.createComment({ issue_number: issue_number, owner: context.repo.owner, repo: context.repo.repo, diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index adbaf310d82..869c9dd94f5 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -40,14 +40,14 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.4.0 if: ${{ matrix.pnl-version == 'head' }} with: fetch-depth: 10 ref: ${{ github.ref }} - name: Checkout pull base - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.4.0 if: ${{ matrix.pnl-version == 'base' }} with: fetch-depth: 10 @@ -61,7 +61,7 @@ jobs: branch: master - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2.2.2 + uses: actions/setup-python@v2.3.1 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.python-architecture }} @@ -75,7 +75,7 @@ jobs: echo ::set-output name=pip_cache_dir::$(python -m pip cache dir) - name: Wheels cache - uses: actions/cache@v2.1.6 + uses: actions/cache@v2.1.7 with: path: ${{ steps.pip_cache.outputs.pip_cache_dir }}/wheels key: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ matrix.python-architecture }}-pip-wheels-v2-${{ github.sha }} @@ -90,14 +90,20 @@ jobs: - name: Add git tag # The generated docs include PNL version, # set it to a fixed value to prevent polluting the diff - if: ${{ github.event_name == 'pull_request' }} - run: git tag 'v999.999.999.999' + if: github.event_name == 'pull_request' + run: git tag --force 'v999.999.999.999' - name: Build Documentation run: make -C docs/ html -e SPHINXOPTS="-aE -j auto" + - name: Remove git tag + # The generated docs include PNL version, + # This was set to a fixed value to prevent polluting the diff + if: github.event_name == 'pull_request' && always() + run: git tag -d 'v999.999.999.999' + - name: Upload Documentation - uses: actions/upload-artifact@v2.2.4 + uses: actions/upload-artifact@v2.3.1 with: name: Documentation-${{matrix.pnl-version}}-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} retention-days: 1 @@ -109,7 +115,7 @@ jobs: - name: Upload PR number for other workflows if: ${{ github.event_name == 'pull_request' }} - uses: actions/upload-artifact@v2.2.4 + uses: actions/upload-artifact@v2.3.1 with: name: pr_number path: ./pr_number.txt @@ -123,7 +129,10 @@ jobs: os: [ubuntu-latest] runs-on: ${{ matrix.os }} + permissions: + contents: write needs: [docs-build] + environment: github-pages if: github.event_name == 'push' && (github.ref == 'refs/heads/master' || github.ref == 'refs/heads/devel' || @@ -133,7 +142,7 @@ jobs: steps: - name: Checkout docs - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.4.0 with: ref: gh-pages diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index c9e7940e4cb..3f26414982b 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -10,9 +10,6 @@ on: tags-ignore: - 'v**' pull_request: - paths-ignore: - - 'docs/**' - - 'doc_requirements.txt' jobs: build: @@ -22,21 +19,34 @@ jobs: matrix: python-version: [3.7, 3.8, 3.9] python-architecture: ['x64'] + extra-args: [''] os: [ubuntu-latest, macos-latest, windows-latest] include: + # 3.7 is broken on macos-11, https://github.com/actions/virtual-environments/issues/4230 + - python-version: 3.7 + python-architecture: 'x64' + os: macos-10.15 # add 32-bit build on windows - python-version: 3.8 python-architecture: 'x86' os: windows-latest + # code-coverage build on macos python 3.9 + - python-version: 3.9 + os: macos-latest + extra-args: '--cov=psyneulink' + exclude: + # 3.7 is broken on macos-11, https://github.com/actions/virtual-environments/issues/4230 + - python-version: 3.7 + os: macos-latest steps: - name: Checkout sources - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.4.0 with: fetch-depth: 10 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2.2.2 + uses: actions/setup-python@v2.3.1 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.python-architecture }} @@ -50,7 +60,7 @@ jobs: echo ::set-output name=pip_cache_dir::$(python -m pip cache dir) - name: Wheels cache - uses: actions/cache@v2.1.6 + uses: actions/cache@v2.1.7 with: path: ${{ steps.pip_cache.outputs.pip_cache_dir }}/wheels key: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ matrix.python-architecture }}-pip-wheels-v2-${{ github.sha }} @@ -71,24 +81,37 @@ jobs: flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - name: Test with pytest - timeout-minutes: 80 - run: pytest --junit-xml=tests_out.xml --verbosity=0 -n auto --maxprocesses=2 + timeout-minutes: 180 + run: pytest --junit-xml=tests_out.xml --verbosity=0 -n auto ${{ matrix.extra-args }} - name: Upload test results - uses: actions/upload-artifact@v2.2.4 + uses: actions/upload-artifact@v2.3.1 with: - name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} + name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }}-${{ matrix.extra-args }} path: tests_out.xml retention-days: 5 if: success() || failure() + - name: Upload coveralls code coverage + if: contains(matrix.extra-args, '--cov=psyneulink') + shell: bash + env: + COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }} + run: | + if [ -n "$COVERALLS_REPO_TOKEN" ]; then + pip install coveralls + coveralls + else + echo "::warning::Not uploading to coveralls.io, token not available!" + fi + - name: Build dist run: | pip install setuptools wheel python setup.py sdist bdist_wheel - name: Upload dist packages - uses: actions/upload-artifact@v2.2.4 + uses: actions/upload-artifact@v2.3.1 with: name: dist-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} path: dist/ diff --git a/.github/workflows/prepare-release.yml b/.github/workflows/prepare-release.yml deleted file mode 100644 index 2d9e5c569a2..00000000000 --- a/.github/workflows/prepare-release.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: Prepare PNL release - -on: - push: - tags: - - 'v*' - -jobs: - prepare-release: - runs-on: ubuntu-latest - steps: - - - name: Checkout sources - uses: actions/checkout@v2 - with: - fetch-depth: 1 - - - name: Check if on master - id: on_master - uses: ./.github/actions/on-branch - with: - branch: master - - - name: Check for existing release with the reference tag - uses: actions/github-script@v4.1 - id: exist_check - with: - script: | - tag = context.ref.split('/').pop() - console.log('running on:' + context.ref); - console.log('Looking for release for tag:' + tag); - try { - release_if_exists = await github.repos.getReleaseByTag({ - owner: context.repo.owner, - repo: context.repo.repo, - tag: tag - }); - console.log('Release found at: ' + release_if_exists.data.html_url); - core.setOutput('exists', 'yes') - } catch (err) { - if (err.status == 404) { - console.log('Release not found.'); - core.setOutput('exists', 'no') - } else { - throw err; - } - } - - - name: Create Release - uses: actions/github-script@v4.1 - if: steps.on_master.outputs.on-branch == 'master' && steps.exist_check.outputs.exists == 'no' - with: - # We need custom token since the default one doesn't trigger actions - github-token: ${{ secrets.CREATE_RELEASE_TOKEN }} - script: | - if (core.getInput('github-token') == 'no-token') { - core.warning('No token to create a release!'); - return 0; - } - - tag = context.ref.split('/').pop() - return await github.repos.createRelease({ - owner: context.repo.owner, - repo: context.repo.repo, - tag_name: tag, - prerelease: true, - name: 'Release ' + tag, - body: 'New features and fixed bugs' - }); diff --git a/.github/workflows/test-release.yml b/.github/workflows/test-release.yml index c3e675219ce..5be1d382304 100644 --- a/.github/workflows/test-release.yml +++ b/.github/workflows/test-release.yml @@ -1,8 +1,9 @@ -name: Test PNL pre-release +name: Test and publish PNL release on: - release: - types: [published] + push: + tags: + - 'v*' jobs: create-python-dist: @@ -11,31 +12,23 @@ jobs: matrix: # Python version in matrix for easier reference python-version: [3.8] - if: ${{ github.event.release.prerelease == true }} + environment: test-pypi outputs: sdist: ${{ steps.create_dist.outputs.sdist }} wheel: ${{ steps.create_dist.outputs.wheel }} steps: + - name: Checkout sources + uses: actions/checkout@v2.4.0 + - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2.2.2 + uses: actions/setup-python@v2.3.1 with: python-version: ${{ matrix.python-version }} - - name: Get release tarball - id: get_release - shell: bash - run: | - wget ${{ github.event.release.tarball_url }} -O psyneulink.tar.gz - export RELEASE_DIR=$(tar -tzf psyneulink.tar.gz | head -n1) - echo ::set-output name=release_dir::$RELEASE_DIR - tar -xzvf psyneulink.tar.gz - - - name: Create Python Dist files id: create_dist shell: bash run: | - cd ${{ steps.get_release.outputs.release_dir }} # We don't care about the python version used. pip install setuptools wheel python setup.py sdist @@ -45,11 +38,11 @@ jobs: echo ::set-output name=wheel::$(ls *.whl) - name: Upload Python dist files - uses: actions/upload-artifact@v2.2.4 + uses: actions/upload-artifact@v2.3.1 with: name: Python-dist-files + path: dist/ retention-days: 1 - path: ${{ steps.get_release.outputs.release_dir }}/dist - name: Upload dist files to test PyPI shell: bash @@ -57,9 +50,9 @@ jobs: # Include implicit dependency on setuptools{,-rust} and preinstall wheel pip install setuptools setuptools-rust wheel pip install twine - # This expects TWINE_USERNAME, TWINE_PASSWORD, and TWINE_REPOSITORY + # This expects TWINE_USERNAME, TWINE_PASSWORD, and TWINE_REPOSITORY_URL # environment variables - # It's not possibel to condition steps on env or secrets, + # It's not possible to condition steps on env or secrets, # We need an explicit check here if [ -n "$TWINE_USERNAME" -a -n "$TWINE_PASSWORD" ]; then twine upload dist/* @@ -69,7 +62,7 @@ jobs: env: TWINE_USERNAME: ${{ secrets.TWINE_TEST_USERNAME }} TWINE_PASSWORD: ${{ secrets.TWINE_TEST_PASSWORD }} - TWINE_REPOSITORY: ${{ secrets.TWINE_TEST_REPOSITORY }} + TWINE_REPOSITORY_URL: ${{ secrets.TWINE_TEST_REPOSITORY_URL }} test-release: @@ -82,19 +75,8 @@ jobs: runs-on: ${{ matrix.os }} needs: [create-python-dist] - if: ${{ github.event.release.prerelease == true }} steps: - - - name: Get release tarball - id: get_release - shell: bash - run: | - curl -L --retry 5 ${{ github.event.release.tarball_url }} --output psyneulink.tar.gz - export RELEASE_DIR=$(tar -tzf psyneulink.tar.gz | head -n1) - echo ::set-output name=release_dir::$RELEASE_DIR - tar -xzvf psyneulink.tar.gz - - name: Download dist files uses: actions/download-artifact@v2 with: @@ -102,12 +84,12 @@ jobs: path: dist/ - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2.2.2 + uses: actions/setup-python@v2.3.1 with: python-version: ${{ matrix.python-version }} # The installation _could_ reuse the 'install-pnl' action, - # but we intentionally avoid workarounds in there. + # but actions deploys workarounds that we want to avoid here. - name: MacOS dependencies run: HOMEBREW_NO_AUTO_UPDATE=1 brew install graphviz if: startsWith(runner.os, 'macOS') @@ -130,27 +112,32 @@ jobs: if: matrix.dist == 'sdist' run: pip install dist/${{ needs.create-python-dist.outputs.sdist }}[dev] + - name: Get tests from the repository + uses: actions/checkout@v2.4.0 + - name: Run tests + shell: bash # run only tests/. We don't care about codestyle/docstyle at this point timeout-minutes: 80 run: | - # Enter the PNL directory otherwise docstyle won't pick up the configuration - cd ${{ steps.get_release.outputs.release_dir }} - pytest --junit-xml=tests_out.xml --verbosity=0 -n auto --maxprocesses=2 tests + # remove sources to prevent conflict with the isntalled package + rm -r -f psyneulink/ docs/ bin/ Matlab/ + # run tests + pytest --junit-xml=tests_out.xml --verbosity=0 -n auto tests - name: Upload test results - uses: actions/upload-artifact@v2.2.4 + uses: actions/upload-artifact@v2.3.1 with: name: test-results-${{ matrix.os }}-${{ matrix.python-version }} - path: ${{ steps.get_release.outputs.release_dir }}/tests_out.xml + path: tests_out.xml retention-days: 30 if: success() || failure() - publish-release: + publish-pypi: runs-on: ubuntu-latest needs: [create-python-dist, test-release] - if: ${{ github.event.release.prerelease == true }} + environment: pypi steps: - name: Download dist files @@ -165,9 +152,9 @@ jobs: # Include implicit dependency on setuptools{,-rust} and preinstall wheel pip3 install --user setuptools setuptools-rust wheel pip3 install --user twine - # This expects TWINE_USERNAME, TWINE_PASSWORD, and TWINE_REPOSITORY + # This expects TWINE_USERNAME, TWINE_PASSWORD, and TWINE_REPOSITORY_URL # environment variables - # It's not possibel to condition steps on env or secrets, + # It's not possible to condition steps on env or secrets, # We need an explicit check here if [ -n "$TWINE_USERNAME" -a -n "$TWINE_PASSWORD" ]; then twine upload dist/* @@ -177,14 +164,55 @@ jobs: env: TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }} TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} - TWINE_REPOSITORY: ${{ secrets.TWINE_REPOSITORY }} + TWINE_REPOSITORY_URL: ${{ secrets.TWINE_REPOSITORY_URL }} + + publish-github: + runs-on: ubuntu-latest + needs: [create-python-dist, test-release] + environment: gh-release + permissions: + contents: write + + steps: + - name: Download dist files + uses: actions/download-artifact@v2 + with: + name: Python-dist-files + path: dist/ - name: Upload dist files to release - uses: actions/github-script@v4.1 + uses: actions/github-script@v5 with: script: | const fs = require('fs') + tag = context.ref.split('/').pop() + console.log('running on:' + context.ref); + console.log('Looking for release for tag:' + tag); + + var release + try { + release = await github.rest.repos.getReleaseByTag({ + owner: context.repo.owner, + repo: context.repo.repo, + tag: tag + }); + console.log('Release found at: ' + release.data.html_url); // ' + } catch (err) { + if (err.status == 404) { + console.log('Release not found, creating a new one'); + release = await github.rest.repos.createRelease({ + owner: context.repo.owner, + repo: context.repo.repo, + tag_name: tag, + name: 'Release ' + tag, + body: 'New features and fixed bugs' + }); + } else { + throw err; + } + } + console.log('Using release upload url: ' + release['data']['upload_url']); // Determine content-length for header to upload asset for (asset of ['${{ needs.create-python-dist.outputs.wheel }}', '${{ needs.create-python-dist.outputs.sdist }}']) { const file_path = 'dist/' + asset; @@ -196,18 +224,10 @@ jobs: const headers = { 'content-type': 'application/zip', 'content-length': file_size(file_path) }; // Upload a release asset - const uploadAssetResponse = await github.repos.uploadReleaseAsset({ - url: '${{ github.event.release.upload_url }}', + const uploadAssetResponse = await github.rest.repos.uploadReleaseAsset({ + url: release.data.upload_url, headers, name: asset, file: fs.readFileSync(file_path) }); } - - // Bump to full release - const uploadAssetResponse = await github.repos.updateRelease({ - owner: context.repo.owner, - repo: context.repo.repo, - release_id: ${{ github.event.release.id }}, - prerelease: false - }); diff --git a/README.rst b/README.rst index 695684b7c8a..bbe1d33cbc4 100644 --- a/README.rst +++ b/README.rst @@ -95,11 +95,15 @@ characteristics that are often (at least in the initial stages of development) i interpreted vs. compiled). That said, priorities for ongoing development of PsyNeuLink are: - i) acceleration, using just-in-time compilation methods and parallelization; + i) acceleration, using just-in-time compilation methods and parallelization + (see `Compilation`, and `Vesely et al., 2022 `_); ii) enhancement of the API to facilitate wrapping modules from other packages for integration into the PsyNeuLink - environment (examples currently exist for Pytorch and Emergent); - iii) integration of tools for parameter estimation, model comparison and data fitting; and - iv) a graphic interface for the construction of models and realtime display of their execution. + environment (examples currently exist for `Pytorch `_ ) and translating into a standard + `Model Description Format (MDF) `_; + iii) integration of tools for parameter estimation, model comparison and data fitting + (see `ParameterEstimationComposition`); and + iv) a graphic interface for the construction of models and realtime display of their execution + (see `PsyNeuLinkView `_). Environment Overview -------------------- @@ -196,42 +200,45 @@ Contributors *(in alphabetical order)* -* **Allie Burton**, Princeton Neuroscience Institute, Princeton University +* **Allie Burton**, Princeton Neuroscience Institute, Princeton University (formerly) * **Laura Bustamante**, Princeton Neuroscience Institute, Princeton University * **Jonathan D. Cohen**, Princeton Neuroscience Institute, Princeton University -* **Samyak Gupta**, Department of Computer Science, Rutgers University +* **Samyak Gupta**, Department of Computer Science, Princeton University * **Abigail Hoskin**, Department of Psychology, Princeton University -* **Peter Johnson**, Princeton Neuroscience Institute, Princeton University +* **Peter Johnson**, Princeton Neuroscience Institute, Princeton University (formerly) * **Justin Junge**, Department of Psychology, Princeton University * **Qihong Lu**, Department of Psychology, Princeton University -* **Kristen Manning**, Princeton Neuroscience Institute, Princeton University +* **Kristen Manning**, Princeton Neuroscience Institute, Princeton University (formerly) * **Katherine Mantel**, Princeton Neuroscience Institute, Princeton University * **Lena Rosendahl**, Department of Mechanical and Aerospace Engineering, Princeton University -* **Dillon Smith**, Princeton Neuroscience Institute, Princeton University -* **Markus Spitzer**, Princeton Neuroscience Institute, Princeton University +* **Dillon Smith**, Princeton Neuroscience Institute, Princeton University (formerly) +* **Markus Spitzer**, Princeton Neuroscience Institute, Princeton University (formerly) * **David Turner**, Princeton Neuroscience Institute, Princeton University -* **Jan Vesely**, Department of Computer Science, Rutgers University -* **Changyan Wang**, Princeton Neuroscience Institute, Princeton University -* **Nate Wilson**, Princeton Neuroscience Institute, Princeton University +* **Jan Vesely**, Department of Computer Science, Rutgers University (formerly) +* **Changyan Wang**, Princeton Neuroscience Institute, Princeton University (formerly) +* **Nate Wilson**, Princeton Neuroscience Institute, Princeton University (formerly) With substantial and greatly appreciated assistance from: -* **Abhishek Bhattacharjee**, Department of Computer Science, Rutgers University +* **Abhishek Bhattacharjee**, Department of Computer Science, Yale University * **Mihai Capota**, Intel Labs, Intel Corporation * **Bryn Keller**, Intel Labs, Intel Corporation -* **Susan Liu**, Princeton Neuroscience Institute, Princeton University +* **Susan Liu**, Princeton Neuroscience Institute, Princeton University (formerly) * **Garrett McGrath**, Princeton Neuroscience Institute, Princeton University -* **Sebastian Musslick**, Princeton Neuroscience Institute, Princeton University +* **Sebastian Musslick**, Princeton Neuroscience Institute, Princeton University (formerly) * **Amitai Shenhav**, Cognitive, Linguistic, & Psychological Sciences, Brown University -* **Michael Shvartsman**, Princeton Neuroscience Institute, Princeton University +* **Michael Shvartsman**, Princeton Neuroscience Institute, Princeton University (formerly) * **Ben Singer**, Princeton Neuroscience Institute, Princeton University -* **Ted Willke**, Intel Labs, Intel Corporation +* **Ted Willke**, Brain Inspired Computing Lab, Intel Corporation -Support for the development of PsyNeuLink has been provided by: +Support +------- + +The development of PsyNeuLink has benefited by generous support from the following agencies: -* The National Institute of Mental Health (R21-MH117548) -* The John Templeton Foundation -* The Templeton World Charitable Foundation +* `The National Institute of Mental Health (R21-MH117548) `_ +* `The John Templeton Foundation `_ +* `The Templeton World Charitable Foundation `_ License ------- diff --git a/Scripts/Debug/Jason_Reward_rate_with_penalty_with_inputs.py b/Scripts/Debug/Jason_Reward_rate_with_penalty_with_inputs.py index ed5485d04b7..ca0d4ebf933 100644 --- a/Scripts/Debug/Jason_Reward_rate_with_penalty_with_inputs.py +++ b/Scripts/Debug/Jason_Reward_rate_with_penalty_with_inputs.py @@ -8,7 +8,6 @@ #%% # import os -import time import numpy as np import psyneulink as pnl @@ -205,7 +204,7 @@ def get_stroop_model(unit_noise_std=.01, dec_noise_std=.1): inp_task.input_port, reward.input_port, punish.input_port], - state_feature_function=pnl.AdaptiveIntegrator(rate=0.1), + state_feature_functions=pnl.AdaptiveIntegrator(rate=0.1), objective_mechanism=objective_mech, function=pnl.GridSearch(), control_signals=[driftrate_control_signal, diff --git a/Scripts/Debug/Predator-Prey Sebastian REDUCED.py b/Scripts/Debug/Predator-Prey Sebastian REDUCED.py index 15ec7e4928b..c62a30a4078 100644 --- a/Scripts/Debug/Predator-Prey Sebastian REDUCED.py +++ b/Scripts/Debug/Predator-Prey Sebastian REDUCED.py @@ -62,7 +62,7 @@ def get_new_episode_flag(): ocm = OptimizationControlMechanism(name='EVC', state_features=[trial_type_input_mech], - # state_feature_function=FEATURE_FUNCTION, + # state_feature_functions=FEATURE_FUNCTION, agent_rep=RegressionCFA( name='RegressionCFA', update_weights=BayesGLM(mu_0=0.5, sigma_0=0.1), diff --git a/Scripts/Debug/Predator-Prey Sebastian.py b/Scripts/Debug/Predator-Prey Sebastian.py index 2ea0ef82e34..b332f104e11 100644 --- a/Scripts/Debug/Predator-Prey Sebastian.py +++ b/Scripts/Debug/Predator-Prey Sebastian.py @@ -167,7 +167,7 @@ def get_action(variable=[[0,0],[0,0],[0,0]]): ocm = OptimizationControlMechanism(name='EVC', state_features=[trial_type_input_mech], - # state_feature_function=FEATURE_FUNCTION, + # state_feature_functions=FEATURE_FUNCTION, agent_rep=RegressionCFA( name='RegressionCFA', update_weights=BayesGLM(mu_0=0.5, sigma_0=0.1), @@ -292,7 +292,7 @@ def print_controller(): print(f'OUTER LOOP AGENT ACTION:{agent_action}') if VERBOSE >= STANDARD_REPORTING: - if agent_comp.controller_mode is BEFORE: + if agent_comp.controller_mode == BEFORE: print_controller() print(f'\nObservations:' f'\n\tPlayer:\n\t\tveridical: {player_percept.parameters.variable.get(context)}' @@ -304,7 +304,7 @@ def print_controller(): f'\n\nActions:\n\tAgent: {agent_action}\n\tOptimal: {optimal_action}' f'\n\nOutcome:\n\t{ocm.objective_mechanism.parameters.value.get(context)}' ) - if agent_comp.controller_mode is AFTER: + if agent_comp.controller_mode == AFTER: print_controller() # Restore frame buffer to state after optimal action taken (at beginning of trial) diff --git a/Scripts/Debug/Umemoto_Feb.py b/Scripts/Debug/Umemoto_Feb.py index e289c3225db..a8ba6f4b11b 100644 --- a/Scripts/Debug/Umemoto_Feb.py +++ b/Scripts/Debug/Umemoto_Feb.py @@ -120,7 +120,7 @@ Umemoto_comp.add_model_based_optimizer(optimizer=pnl.OptimizationControlMechanism(agent_rep=Umemoto_comp, state_features=[Target_Stim.input_port, Distractor_Stim.input_port, Reward.input_port], - state_feature_function=pnl.AdaptiveIntegrator(rate=1.0), + state_feature_functions=pnl.AdaptiveIntegrator(rate=1.0), objective_mechanism=pnl.ObjectiveMechanism(monitor_for_control=[Reward, (Decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD], 1, -1)], ), diff --git a/Scripts/Debug/Umemoto_Feb2.py b/Scripts/Debug/Umemoto_Feb2.py index 2815e4fbb2d..1b75225342e 100644 --- a/Scripts/Debug/Umemoto_Feb2.py +++ b/Scripts/Debug/Umemoto_Feb2.py @@ -132,7 +132,7 @@ state_features=[Target_Stim.input_port, Distractor_Stim.input_port, Reward.input_port], - state_feature_function=pnl.AdaptiveIntegrator(rate=1.0), + state_feature_functions=pnl.AdaptiveIntegrator(rate=1.0), objective_mechanism=pnl.ObjectiveMechanism( monitor_for_control=[Reward, (Decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD], 1, -1)], diff --git a/Scripts/Debug/Yotam LCA Model LLVM.py b/Scripts/Debug/Yotam LCA Model LLVM.py index f404a3d5f7f..e2226fe9d5b 100644 --- a/Scripts/Debug/Yotam LCA Model LLVM.py +++ b/Scripts/Debug/Yotam LCA Model LLVM.py @@ -12,9 +12,7 @@ import networkx as nx import os import sys -import warnings import time -#warnings.filterwarnings("error", category=UserWarning) ###################### Convenience functions for testing script ################################# diff --git a/Scripts/Debug/new_umemoto.py b/Scripts/Debug/new_umemoto.py index e10028ee6ae..621b0b3744a 100644 --- a/Scripts/Debug/new_umemoto.py +++ b/Scripts/Debug/new_umemoto.py @@ -114,7 +114,7 @@ Umemoto_comp.add_model_based_optimizer(optimizer=pnl.OptimizationControlMechanism(agent_rep=Umemoto_comp, state_features={pnl.SHADOW_EXTERNAL_INPUTS: [Target_Stim, Distractor_Stim, Reward]}, - state_feature_function=pnl.AdaptiveIntegrator(rate=1.0), + state_feature_functions=pnl.AdaptiveIntegrator(rate=1.0), objective_mechanism=pnl.ObjectiveMechanism(monitor_for_control=[Reward, (Decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD], 1, -1)], ), diff --git a/Scripts/Debug/predator_prey_opt/predator_prey_dmt.py b/Scripts/Debug/predator_prey_opt/predator_prey_dmt.py index f1f4e80bcf1..0eb89fb2ed7 100644 --- a/Scripts/Debug/predator_prey_opt/predator_prey_dmt.py +++ b/Scripts/Debug/predator_prey_opt/predator_prey_dmt.py @@ -195,7 +195,7 @@ def get_action(variable=[[0, 0], [0, 0], [0, 0]]): # ************************************** CONOTROL APPARATUS *********************************************************** self.ocm = OptimizationControlMechanism(name='EVC', state_features=[self.prey_pred_trial_input_mech, self.single_prey_trial_input_mech, self.double_prey_trial_input_mech], - # state_feature_function=FEATURE_FUNCTION, + # state_feature_functions=FEATURE_FUNCTION, agent_rep=RegressionCFA( update_weights=BayesGLM(mu_0=-0.0, sigma_0=0.0001), prediction_terms=[PV.F, PV.C, PV.COST] diff --git a/Scripts/Debug/stability_flexibility_simple.py b/Scripts/Debug/stability_flexibility_simple.py index f16443aafc0..b2bf6b133c1 100644 --- a/Scripts/Debug/stability_flexibility_simple.py +++ b/Scripts/Debug/stability_flexibility_simple.py @@ -164,7 +164,7 @@ def computeAccuracy(variable): meta_controller = pnl.OptimizationControlMechanism(agent_rep=stabilityFlexibility, state_features=[inputLayer.input_port, stimulusInfo.input_port], - state_feature_function=pnl.Buffer(history=100), + state_feature_functions=pnl.Buffer(history=100), objective_mechanism=objective_mech, function=pnl.GridSearch(), control_signals=[signal]) diff --git a/Scripts/Examples/EVC OCM.py b/Scripts/Examples/EVC OCM.py index 6216f00dc2a..362ac5e07db 100644 --- a/Scripts/Examples/EVC OCM.py +++ b/Scripts/Examples/EVC OCM.py @@ -32,7 +32,7 @@ comp.add_model_based_optimizer(optimizer=OptimizationControlMechanism(name='OCM', agent_rep=comp, state_features=[Input.input_port, reward.input_port], - state_feature_function=AdaptiveIntegrator(rate=0.5), + state_feature_functions=AdaptiveIntegrator(rate=0.5), objective_mechanism=ObjectiveMechanism( name='OCM Objective Mechanism', function=LinearCombination(operation=PRODUCT), diff --git a/Scripts/Examples/EVC-Gratton Composition.py b/Scripts/Examples/EVC-Gratton Composition.py index 7d5a35f610e..071ae433155 100644 --- a/Scripts/Examples/EVC-Gratton Composition.py +++ b/Scripts/Examples/EVC-Gratton Composition.py @@ -65,7 +65,7 @@ state_features=[target_stim.input_port, flanker_stim.input_port, reward.input_port], - state_feature_function=pnl.AdaptiveIntegrator( + state_feature_functions=pnl.AdaptiveIntegrator( rate=1.0), objective_mechanism=objective_mech, function=pnl.GridSearch(), diff --git a/Scripts/Examples/EVC-Gratton-GaussianProcess.py b/Scripts/Examples/EVC-Gratton-GaussianProcess.py index 32cd454e6ea..5148e22d8a9 100644 --- a/Scripts/Examples/EVC-Gratton-GaussianProcess.py +++ b/Scripts/Examples/EVC-Gratton-GaussianProcess.py @@ -52,7 +52,7 @@ comp.add_linear_processing_pathway(task_execution_pathway) ocm = pnl.OptimizationControlMechanism(state_features=[Input, Reward], - state_feature_function=pnl.AdaptiveIntegrator(rate=0.5), + state_feature_functions=pnl.AdaptiveIntegrator(rate=0.5), agent_rep=comp, # function=pnl.GaussianProcessOptimization, function=pnl.GridSearch, diff --git a/Scripts/Examples/StabilityFlexibility.py b/Scripts/Examples/StabilityFlexibility.py index 1b5c5a1b254..2a5f3b2c23f 100644 --- a/Scripts/Examples/StabilityFlexibility.py +++ b/Scripts/Examples/StabilityFlexibility.py @@ -177,7 +177,7 @@ def computeAccuracy(variable): meta_controller = pnl.OptimizationControlMechanism(agent_rep=stabilityFlexibility, state_features=[inputLayer.input_port, stimulusInfo.input_port], - state_feature_function=pnl.Buffer(history=3), + state_feature_functions=pnl.Buffer(history=3), objective_mechanism=objective_mech, function=pnl.GridSearch(), control_signals=[signal]) diff --git a/Scripts/Examples/Tutorial/Stroop Model - EVC.py b/Scripts/Examples/Tutorial/Stroop Model - EVC.py index ef20720c04f..1db6570ad1d 100644 --- a/Scripts/Examples/Tutorial/Stroop Model - EVC.py +++ b/Scripts/Examples/Tutorial/Stroop Model - EVC.py @@ -64,8 +64,8 @@ evc = OptimizationControlMechanism(name='EVC', agent_rep=Stroop_model, state_features=[color_input.input_port, word_input.input_port, reward.input_port], - state_feature_function=AdaptiveIntegrator(rate=1.0), - # state_feature_function=AdaptiveIntegrator, + state_feature_functions=AdaptiveIntegrator(rate=1.0), + # state_feature_functions=AdaptiveIntegrator, objective_mechanism= \ ObjectiveMechanism( name='EVC Objective Mechanism', diff --git a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model.py b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model.py index 73d09f8a431..db888ec5ebf 100644 --- a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model.py +++ b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model.py @@ -181,7 +181,7 @@ def print_weights(): print('ControlSignal variables: ', [sig.parameters.variable.get(i) for sig in lvoc.control_signals]) print('ControlSignal values: ', [sig.parameters.value.get(i) for sig in lvoc.control_signals]) # print('state_features: ', lvoc.get_feature_values(context=c)) - print('lvoc: ', lvoc.evaluation_function([sig.parameters.variable.get(i) for sig in lvoc.control_signals], context=i)) + print('lvoc: ', lvoc.evaluate_agent_rep([sig.parameters.variable.get(i) for sig in lvoc.control_signals], context=i)) # print('time: ', duration) print('--------------------') @@ -222,6 +222,6 @@ def print_weights(): # print('ControlSignal variables: ', [sig.parameters.variable.get(c) for sig in lvoc.control_signals]) # print('ControlSignal values: ', [sig.parameters.value.get(c) for sig in lvoc.control_signals]) # # print('state_features: ', lvoc.get_feature_values(context=c)) -# print('lvoc: ', lvoc.evaluation_function([sig.parameters.variable.get(c) for sig in lvoc.control_signals], context=c)) +# print('lvoc: ', lvoc.evaluate_agent_rep([sig.parameters.variable.get(c) for sig in lvoc.control_signals], context=c)) # print('time: ', duration) # print('--------------------') diff --git a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py index 2a1fd9718b8..7d5cea9a171 100644 --- a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py +++ b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py @@ -232,7 +232,7 @@ def print_weights(): print('ControlSignal variables: ', [sig.parameters.variable.get(i) for sig in lvoc.control_signals]) print('ControlSignal values: ', [sig.parameters.value.get(i) for sig in lvoc.control_signals]) # print('state_features: ', lvoc.state_feature_values) - # print('lvoc: ', lvoc.evaluation_function([sig.parameters.variable.get(i) for sig in lvoc.control_signals], context=i)) + # print('lvoc: ', lvoc.evaluate_agent_rep([sig.parameters.variable.get(i) for sig in lvoc.control_signals], context=i)) # print('time: ', duration) print('--------------------') diff --git a/Scripts/Models (Under Development)/Predator-Prey Model DEMO.py b/Scripts/Models (Under Development)/Predator-Prey Model DEMO.py index 659f3b19d37..7a30b14d0bd 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model DEMO.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model DEMO.py @@ -271,7 +271,7 @@ def print_controller(): print(f'OUTER LOOP AGENT ACTION:{agent_action}') if VERBOSE >= STANDARD_REPORTING: - if agent_comp.controller_mode is BEFORE: + if agent_comp.controller_mode == BEFORE: print_controller() print(f'\nObservations:' f'\n\tPlayer:\n\t\tveridical: {player_percept.parameters.variable.get(context)}' @@ -283,7 +283,7 @@ def print_controller(): f'\n\nActions:\n\tAgent: {agent_action}\n\tOptimal: {optimal_action}' f'\n\nOutcome:\n\t{ocm.objective_mechanism.parameters.value.get(context)}' ) - if agent_comp.controller_mode is AFTER: + if agent_comp.controller_mode == AFTER: print_controller() # Restore frame buffer to state after optimal action taken (at beginning of trial) @@ -292,9 +292,9 @@ def print_controller(): # # The following allows accumulation of agent's errors (assumes simulations are run before actual action) # ddqn_agent.buffer.buffer = actual_agent_frame_buffer - if ACTION is OPTIMAL_ACTION: + if ACTION == OPTIMAL_ACTION: action = optimal_action - elif ACTION is AGENT_ACTION: + elif ACTION == AGENT_ACTION: action = agent_action else: assert False, "Must choose either OPTIMAL_ACTION or AGENT_ACTION" diff --git a/Scripts/Models (Under Development)/Predator-Prey Model DQN LVOC.py b/Scripts/Models (Under Development)/Predator-Prey Model DQN LVOC.py index f0920261cd0..f6525841a6d 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model DQN LVOC.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model DQN LVOC.py @@ -161,7 +161,7 @@ def get_action(variable=[[0,0],[0,0],[0,0]]): ocm = OptimizationControlMechanism(name='EVC', state_features=trial_type_input_mech, - state_feature_function=FEATURE_FUNCTION, + state_feature_functions=FEATURE_FUNCTION, agent_rep=RegressionCFA( update_weights=BayesGLM(mu_0=0.5, sigma_0=0.1), prediction_terms=[PV.F, PV.C, PV.COST] @@ -278,7 +278,7 @@ def print_controller(): print(f'OUTER LOOP AGENT ACTION:{agent_action}') if VERBOSE >= STANDARD_REPORTING: - if agent_comp.controller_mode is BEFORE: + if agent_comp.controller_mode == BEFORE: print_controller() print(f'\nObservations:' f'\n\tPlayer:\n\t\tveridical: {player_percept.parameters.variable.get(context)}' @@ -290,7 +290,7 @@ def print_controller(): f'\n\nActions:\n\tAgent: {agent_action}\n\tOptimal: {optimal_action}' f'\n\nOutcome:\n\t{ocm.objective_mechanism.parameters.value.get(context)}' ) - if agent_comp.controller_mode is AFTER: + if agent_comp.controller_mode == AFTER: print_controller() # Restore frame buffer to state after optimal action taken (at beginning of trial) diff --git a/Scripts/Models (Under Development)/Predator-Prey Model DQN.py b/Scripts/Models (Under Development)/Predator-Prey Model DQN.py index 73cea5710bc..3354bd89e72 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model DQN.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model DQN.py @@ -262,7 +262,7 @@ def print_controller(): print(f'OUTER LOOP AGENT ACTION:{agent_action}') if VERBOSE >= STANDARD_REPORTING: - if agent_comp.controller_mode is BEFORE: + if agent_comp.controller_mode == BEFORE: print_controller() print(f'\nObservations:' f'\n\tPlayer:\n\t\tveridical: {player_percept.parameters.variable.get(context)}' @@ -274,7 +274,7 @@ def print_controller(): f'\n\nActions:\n\tAgent: {agent_action}\n\tOptimal: {optimal_action}' f'\n\nOutcome:\n\t{ocm.objective_mechanism.parameters.value.get(context)}' ) - if agent_comp.controller_mode is AFTER: + if agent_comp.controller_mode == AFTER: print_controller() # Restore frame buffer to state after optimal action taken (at beginning of trial) @@ -283,9 +283,9 @@ def print_controller(): # # The following allows accumulation of agent's errors (assumes simulations are run before actual action) # ddqn_agent.buffer.buffer = actual_agent_frame_buffer - if ACTION is OPTIMAL_ACTION: + if ACTION == OPTIMAL_ACTION: action = optimal_action - elif ACTION is AGENT_ACTION: + elif ACTION == AGENT_ACTION: action = agent_action else: assert False, "Must choose either OPTIMAL_ACTION or AGENT_ACTION" diff --git a/conftest.py b/conftest.py index c80f45d3e56..92c98215b47 100644 --- a/conftest.py +++ b/conftest.py @@ -39,10 +39,10 @@ def pytest_runtest_setup(item): pytest.skip('{0} tests not requested'.format(m)) if 'cuda' in item.keywords and not pnlvm.ptx_enabled: - pytest.skip('PTX engine not enabled/available') + pytest.skip('PTX engine not enabled/available') if 'pytorch' in item.keywords and not pytorch_available: - pytest.skip('pytorch not available') + pytest.skip('pytorch not available') doctest.ELLIPSIS_MARKER = "[...]" @@ -118,7 +118,7 @@ def get_func_execution(func, func_mode): elif func_mode == 'Python': return func.function else: - assert False, "Unknown function mode: {}".format(mech_mode) + assert False, "Unknown function mode: {}".format(func_mode) @pytest.helpers.register def get_mech_execution(mech, mech_mode): diff --git a/dev_requirements.txt b/dev_requirements.txt index de4c63f74c6..9dc8d8afd90 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,9 +1,9 @@ jupyter<=1.0.0 pytest<6.2.6 pytest-benchmark<3.4.2 -pytest-cov<2.12.2 +pytest-cov<3.0.1 pytest-helpers-namespace<2021.4.30 pytest-profiling<=1.7.0 pytest-pycodestyle<=2.2.0 pytest-pydocstyle<=2.2.0 -pytest-xdist<2.4.0 +pytest-xdist<2.6.0 diff --git a/doc_requirements.txt b/doc_requirements.txt index b28dc8a2cad..1e90f009848 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,3 +1,3 @@ -psyneulink-sphinx-theme<1.2.2.2 -sphinx<4.1.3 +psyneulink-sphinx-theme<1.2.3.1 +sphinx<4.2.1 sphinx_autodoc_typehints<1.13.0 diff --git a/docs/source/BasicsAndPrimer.rst b/docs/source/BasicsAndPrimer.rst index 213bd914d6a..0540b1fbc42 100644 --- a/docs/source/BasicsAndPrimer.rst +++ b/docs/source/BasicsAndPrimer.rst @@ -720,7 +720,7 @@ Modulable Parameters ^^^^^^^^^^^^^^^^^^^^ Some parameters of Components can be modulable, meaning they can be modified by another Component (specifically, a `ModulatorySignal ` belonging to a `ModulatoryMechanism `). If the parameter -of a `Mechanism ` or a `Projection ` is modulable, it is assigned a `ParameterPort` -- this is a +of a `Mechanism ` or a `Projection ` is modulable, it may be assigned a `ParameterPort` -- this is a Component that belongs to the Mechanism or Projection and can receive a Projection from a ModulatorySignal, allowing another component to modulate the value of the parameter. ParameterPorts are created for every modulable parameter of a Mechanism, its `function `, any of its @@ -758,6 +758,17 @@ parameter:: This is because when the Compoistion was run, the ``control`` Mechanism modulated the value of the gain parameter. +Some Parameters may be modulable, but not *modulated* by a +ParameterPort, because ParameterPorts are only created for Parameters +whose values are numeric at the time of Component construction. For +example, `TransferMechanism.termination_threshold` has a default value +of None and will not have a ParameterPort by default. Similarly, when +`noise ` is set to a function or value +containing a function, it will not have an associated ParameterPort. + +In this case, dot notation for these modulable Parameters will behave +the same as for non-modulable Parameters. + .. *Initialization* ???XXX .. _BasicsAndPrimer_Monitoring_Values: diff --git a/docs/source/Compilation.rst b/docs/source/Compilation.rst index 3ae6ec0528d..ce9d9e38471 100644 --- a/docs/source/Compilation.rst +++ b/docs/source/Compilation.rst @@ -3,7 +3,11 @@ Compilation PsyNeulink includes a runtime compiler to improve performance of executed models. This section describes the overview of the compiler design and its use. -The performance improvements varies, but it has been observed to be between one and three orders of magnitude depending on the model. +The performance improvements varies, but it has been observed to be between +one and three orders of magnitude depending on the model. +See `Vesely et al. (2022) `_ +for additional information about the approach taken to compilation, and +`Composition_Compilation` for it use in executing a `Composition`. Overview diff --git a/docs/source/Composition.rst b/docs/source/Composition.rst index 577483ab212..5bad64708e7 100644 --- a/docs/source/Composition.rst +++ b/docs/source/Composition.rst @@ -10,6 +10,8 @@ Composition AutodiffComposition CompositionFunctionApproximator + ParameterEstimationComposition + | .. container:: related @@ -27,6 +29,6 @@ Composition Report .. automodule:: psyneulink.core.compositions.composition - :members: Composition, NodeRole + :members: Composition, NodeRole, Graph :private-members: - :exclude-members: Parameters, show_structure, CompositionError + :exclude-members: Parameters, show_structure, CompositionError, get_inputs_format diff --git a/docs/source/CompositionInterfaceMechanism.rst b/docs/source/CompositionInterfaceMechanism.rst index 0eed4e09f4c..7a9b18233b0 100644 --- a/docs/source/CompositionInterfaceMechanism.rst +++ b/docs/source/CompositionInterfaceMechanism.rst @@ -1,7 +1,7 @@ CompositionInterfaceMechanism ============================= -.. automodule:: psyneulink.library.components.mechanisms.processing.compositioninterfacemechanism +.. automodule:: psyneulink.core.components.mechanisms.processing.compositioninterfacemechanism :members: :private-members: :exclude-members: random diff --git a/docs/source/Compositions.rst b/docs/source/Compositions.rst index 7e279020f9b..233a252c0ae 100644 --- a/docs/source/Compositions.rst +++ b/docs/source/Compositions.rst @@ -8,4 +8,5 @@ Subclasses of `Core` `Composition` that implement specialized operations. AutodiffComposition CompositionFunctionApproximator + ParameterEstimationComposition diff --git a/docs/source/Core.rst b/docs/source/Core.rst index b328536c6d3..e66bd4c40ee 100644 --- a/docs/source/Core.rst +++ b/docs/source/Core.rst @@ -3,13 +3,87 @@ Core * `Component` - `Mechanism` + + - `ProcessingMechanism` + + - `TransferMechanism` + + - `IntegratorMechanism` + + - `ObjectiveMechanism` + + + - `ModulatoryMechanism` + + - `ControlMechanism` + + - `LearningMechanism` + - `Projection` + + - `PathwayProjection` + + - `MappingProjection` + + - `MaskedMappingProjection` + + - `AutoAssociativeProjection` + + - `ModulatoryProjection` + + - `LearningProjection` + + - `ControlProjection` + + - `GatingProjection` + - `Port` + + - `InputPort` + + - `ParameterPort` + + - `OutputPort` + + - `ModulatorySignal` + + - `LearningSignal` + + - `ControlSignal` + + - `GatingSignal` + - `Function` + - `NonStatefulFunctions` + + - `CombinationFunctions` + + - `DistributionFunctions` + + - `LearningFunctions` + + - `ObjectiveFunctions` + + - `OptimizationFunctions` + + - `SelectionFunctions` + + - `TransferFunctions` + + - `StatefulFunctions` + + - `IntegratorFunctions` + + - `MemoryFunctions` + + - `UserDefinedFunction` + + * `Composition` - `AutodiffComposition` - `CompositionFunctionApproximator` + - `ParameterEstimationComposition` * `Services` - `Registry` diff --git a/docs/source/IntegratorFunctions.rst b/docs/source/IntegratorFunctions.rst index caad95e4a0e..a8f2387ac86 100644 --- a/docs/source/IntegratorFunctions.rst +++ b/docs/source/IntegratorFunctions.rst @@ -4,7 +4,7 @@ IntegratorFunctions .. toctree:: :maxdepth: 3 -.. automodule:: psyneulink.core.components.functions.statefulfunctions.integratorfunctions +.. automodule:: psyneulink.core.components.functions.stateful.integratorfunctions :members: :private-members: :exclude-members: Parameters diff --git a/docs/source/LearningMechanism.rst b/docs/source/LearningMechanism.rst index d01589dac27..a848d6d5443 100644 --- a/docs/source/LearningMechanism.rst +++ b/docs/source/LearningMechanism.rst @@ -1,5 +1,5 @@ -Learning Mechanism -================== +LearningMechanism +================= .. automodule:: psyneulink.core.components.mechanisms.modulatory.learning.learningmechanism :members: diff --git a/docs/source/MemoryFunctions.rst b/docs/source/MemoryFunctions.rst index 01484defc0d..a1171924052 100644 --- a/docs/source/MemoryFunctions.rst +++ b/docs/source/MemoryFunctions.rst @@ -4,7 +4,7 @@ MemoryFunctions .. toctree:: :maxdepth: 3 -.. automodule:: psyneulink.core.components.functions.statefulfunctions.memoryfunctions +.. automodule:: psyneulink.core.components.functions.stateful.memoryfunctions :members: :private-members: :exclude-members: Parameters, _validate_params, _validate, _initialize_previous_value, _function, _store_memory, _parse_memories, _is_duplicate, _get_distance, _update diff --git a/docs/source/OptimizationControlMechanism.rst b/docs/source/OptimizationControlMechanism.rst index cb9f824d175..371812fb1a6 100644 --- a/docs/source/OptimizationControlMechanism.rst +++ b/docs/source/OptimizationControlMechanism.rst @@ -4,4 +4,4 @@ OptimizationControlMechanism .. automodule:: psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism :members: :private-members: - :exclude-members: Linear, random, Parameters + :exclude-members: Linear, random, Parameters, OptimizationControlMechanismError diff --git a/docs/source/ParameterEstimationComposition.rst b/docs/source/ParameterEstimationComposition.rst new file mode 100644 index 00000000000..b0a50802329 --- /dev/null +++ b/docs/source/ParameterEstimationComposition.rst @@ -0,0 +1,10 @@ +ParameterEstimationComposition +============================== + +.. toctree:: + :maxdepth: 2 + +.. automodule:: psyneulink.core.compositions.parameterestimationcomposition + :members: + :private-members: + :exclude-members: _validate_params, run, evaluate diff --git a/docs/source/StatefulFunction.rst b/docs/source/StatefulFunction.rst index ede81c12aac..5a8b4985fc4 100644 --- a/docs/source/StatefulFunction.rst +++ b/docs/source/StatefulFunction.rst @@ -4,7 +4,7 @@ StatefulFunction .. toctree:: :maxdepth: 3 -.. automodule:: psyneulink.core.components.functions.statefulfunctions.statefulfunction +.. automodule:: psyneulink.core.components.functions.stateful.statefulfunction :members: :private-members: :exclude-members: Parameters diff --git a/docs/source/_static/CIM_FIgure.svg b/docs/source/_static/CIM_FIgure.svg new file mode 100644 index 00000000000..32818205963 --- /dev/null +++ b/docs/source/_static/CIM_FIgure.svg @@ -0,0 +1,1502 @@ + + + + + + + + + + + + + OUTER COMPOSITION + + + + NESTED COMPOSITION + + + + + + + + + + + + + + + + OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + INPUT NODE + + + + + + + ParameterPorts + + + + + + + + intercept + + + + slope + + + + + + + InputPorts + + + + + + + + InputPort-0 + + + + + + + + + + + + + + + + + INPUT_CIM_A_InputPort-0 + + + + INPUT_CIM_D_InputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + NESTED COMPOSITION Input_CIM + + + + + + + InputPorts + + + + + + + + INPUT_CIM_A_InputPort-0 + + + + INPUT_CIM_D_InputPort-0 + + + + + + + + + + + + + + + + + + + + + + + OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + D + + + + + + + ParameterPorts + + + + + + + + intercept + + + + slope + + + + + + + InputPorts + + + + + + + + InputPort-0 + + + + + + + + + + + + + + + + + + + + OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + A + + + + + + + ParameterPorts + + + + + + + + intercept + + + + slope + + + + + + + InputPorts + + + + + + + + InputPort-0 + + + + + + + + + + + + + + + + + + + + E[slope] ControlSignal + + + OutputPorts + + + Mechanism + + + : + + + CONTROL MECHANISM + + + + + + + InputPorts + + + + + + + + MONITOR B + + + + + + + + + + + + + + + + + PARAMETER_CIM_E_slope + + + OutputPorts + + + Mechanism + + + : + + + NESTED COMPOSITION Parameter_CIM + + + + + + + InputPorts + + + + + + + + PARAMETER_CIM_E_slope + + + + + + + + + + + + + + + + + + + + OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + E + + + + + + + ParameterPorts + + + + + + + + intercept + + + + slope + + + + + + + InputPorts + + + + + + + + InputPort-0 + + + + + + + + + + + + + + + + + + + + + OUTPUT_CIM_B_OutputPort-0 + + + + OUTPUT_CIM_C_OutputPort-0 + + + + OUTPUT_CIM_F_OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + NESTED COMPOSITION Output_CIM + + + + + + + InputPorts + + + + + + + + OUTPUT_CIM_B_OutputPort-0 + + + + OUTPUT_CIM_C_OutputPort-0 + + + + OUTPUT_CIM_F_OutputPort-0 + + + + + + + + + + + + + + + + + + + + OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + OUTPUT NODE + + + + + + + ParameterPorts + + + + + + + + intercept + + + + slope + + + + + + + InputPorts + + + + + + + + InputPort-0 + + + + + + + + + + + + + + Mechanism + + + : + + + OUTER COMPOSITION Output_CIM + + + + + + + InputPorts + + + + + + + + OUTPUT_CIM_NESTED COMPOSITION_OUTPUT_CIM_B_OutputPort-0 + + + + OUTPUT_CIM_NESTED COMPOSITION_OUTPUT_CIM_C_OutputPort-0 + + + + OUTPUT_CIM_NESTED COMPOSITION_OUTPUT_CIM_F_OutputPort-0 + + + + OUTPUT_CIM_OUTPUT NODE_OutputPort-0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + INPUT_CIM_INPUT NODE_InputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + OUTER COMPOSITION Input_CIM + + + + + + + + + + + + + + + + + + + + + + + OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + B + + + + + + + ParameterPorts + + + + + + + + intercept + + + + slope + + + + + + + InputPorts + + + + + + + + InputPort-0 + + + + + + + + + + + + + + + + + + + + + + + OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + C + + + + + + + ParameterPorts + + + + + + + + intercept + + + + slope + + + + + + + InputPorts + + + + + + + + InputPort-0 + + + + + + + + + + + + + + + + + + + + + + + OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + F + + + + + + + ParameterPorts + + + + + + + + intercept + + + + slope + + + + + + + InputPorts + + + + + + + + InputPort-0 + + + + + + + + + + + + + + + + + + OUTER COMPOSITION + + + + NESTED COMPOSITION + + + + + + + + + + + + + + + + OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + INPUT NODE + + + + + + + ParameterPorts + + + + + + + + intercept + + + + slope + + + + + + + InputPorts + + + + + + + + InputPort-0 + + + + + + + + + + + + + + + + + INPUT_CIM_A_InputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + NESTED COMPOSITION Input_CIM + + + + + + + InputPorts + + + + + + + + INPUT_CIM_A_InputPort-0 + + + + + + + + + + + + + + + + + + + + OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + A + + + + + + + ParameterPorts + + + + + + + + intercept + + + + slope + + + + + + + InputPorts + + + + + + + + InputPort-0 + + + + + + + + + + + + + + + + + + + + B[slope] ControlSignal + + + OutputPorts + + + Mechanism + + + : + + + CONTROL MECHANISM + + + + + + + InputPorts + + + + + + + + OUTCOME + + + + + + + + + + + + + + + + + PARAMETER_CIM_B_slope + + + OutputPorts + + + Mechanism + + + : + + + NESTED COMPOSITION Parameter_CIM + + + + + + + InputPorts + + + + + + + + PARAMETER_CIM_B_slope + + + + + + + + + + + + + + + + + + + + OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + B + + + + + + + ParameterPorts + + + + + + + + intercept + + + + slope + + + + + + + InputPorts + + + + + + + + InputPort-0 + + + + + + + + + + + + + + + + + + + + + + + OUTPUT_CIM_C_OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + NESTED COMPOSITION Output_CIM + + + + + + + InputPorts + + + + + + + + OUTPUT_CIM_C_OutputPort-0 + + + + + + + + + + + + + + + + + OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + OUTPUT NODE + + + + + + + ParameterPorts + + + + + + + + intercept + + + + slope + + + + + + + InputPorts + + + + + + + + InputPort-0 + + + + + + + + + + + + + + Mechanism + + + : + + + OUTER COMPOSITION Output_CIM + + + + + + + InputPorts + + + + + + + + OUTPUT_CIM_OUTPUT NODE_OutputPort-0 + + + + + + + + + + + + + + + + + + + + INPUT_CIM_INPUT NODE_InputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + OUTER COMPOSITION Input_CIM + + + + + + + + + + + + + + + + + + + + + + + OutputPort-0 + + + OutputPorts + + + Mechanism + + + : + + + C + + + + + + + ParameterPorts + + + + + + + + intercept + + + + slope + + + + + + + InputPorts + + + + + + + + InputPort-0 + + + + + + + + + + + +A +B + diff --git a/docs/source/_static/Modulation_Anatomy_fig.svg b/docs/source/_static/Modulation_Anatomy_fig.svg index 89d1d80a358..0f901251b77 100644 --- a/docs/source/_static/Modulation_Anatomy_fig.svg +++ b/docs/source/_static/Modulation_Anatomy_fig.svg @@ -1,2612 +1,1006 @@ - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + - - + + + + + + + + + + - - + + + + + + + + + + GatingSignal + + + GatingMechanism + + + + GatingSignal + + + - - + + + + + + + + + + - - + + + + + + + + + + ControlMechanism + + + ControlSignal + + + + + + + + + - - + + + + + + + + - - + + + + + + + + - - + + + + + + + + + - - + + + + + + + + + value + - - + + + + + + + + - - + + + + + + + + + variable + - - + + + + + + + + + - - + + + + + + + + + Function + - - + + + + + + + + + InputPort + + + + + Mapping + + + Projections + + + Processing + + + Mechanism + + + + + value + + + + + + + Mapping + + + Projection + + + GatingProjection + + + - - - + + + + + + + + + value + - - + + + + + + + + + variable + - - + + + + + + + + + - - + + + + + + + + + Function + - - + + + + + + + + + + - - + + + + + + + + + value + - - + + + + + + + + + variable + - - + + + + + + + + + - - + + + + + + + + + Function + - - + + + + + + + + + OutputPort + + + + + ParameterPort + + + - - + + + + + + + + + Function + - - + + + + + + + + + + + + + + value + + + - - + + + + + + + + + Function + - - - + + + + + + + + + variable + + + variable + + + (parameter + + + attribute) + + + - - + + + + + + + + + value + - - + + + + + + + + - - + + + + + + + + + variable + - - + + + + + + + + + value + + + ControlProjection + + + + + + - - - + + + + + + + + + Function + - - + + + + + + + + + value + + + variable + + + + + GatingProjection + + + + + + value + + + variable + + + - - + + + + + + + + + value + - - - - - + + + + + + + + + - - - + + + + + + + + + Function + - - + + + + + + + + - - + + + + + + + + + variable + - - + + + + + + + + + - - + + + + + + + + + value + - - - - - - - - - - - + + + + + + + + + - - + + + + + + + + + Function + - - + + + + + + + + - - + + + + + + + + + variable + - - - + + + + + + + + + + ControlSignal + + + - - - + + + + + + + + + Function + - - + + + + + + + + + value + + + + + + value + + + - - + + + + + + + + + Function + - - + + + + + + + + + variable + + + ControlProjection + + + + + + + ParameterPort + + + - - + + + + + + + + + value + - - - + + + + + + + + + - - + + + + + + + + + Function + - - + + + + + + + + + parameter + + + attribute + + + + + - - + + + + + + + + + Function + - - - + + + + + + + + + - - + + + + + + + + + + - - + + + + + + + + + + LearningMechanism + + + LearningSignal + + + LearningProjection + + + + + variable + + + + + + + + + + + + value + + + variable + + + - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + Function + + + + + + + + + + diff --git a/docs/source/_static/Optimization_fig.svg b/docs/source/_static/Optimization_fig.svg new file mode 100644 index 00000000000..46594b558a0 --- /dev/null +++ b/docs/source/_static/Optimization_fig.svg @@ -0,0 +1,3184 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +A +Optimization + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OptimizationCont + r + olMechanism + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Composition or + + + + + + + + + + + CompositionFunctionApp + r + oximator + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OptimizationFunction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + sea + r + ch_function + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + sea + r + ch_space + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + agent_ + r + ep + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + (adapt) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + call function + + + + + + + + + + + + + + + + + + + + pass a + r + gument + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Execution + + + + + + + + + + + + + + + + + + + + Update + + + + + + + + + + + featu + r + e values + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + (Adapt) + + + + + + + + + + + + + + + + Optimize + + + + + + + + + + + cont + r + ol_allocation + + + + + + + + + + + Implement + + + + + + + + + + + cont + r + ol_allocation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + objective_function + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + r + etu + r + n/assign value + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + evaluate + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + evaluate_agent_ + r + ep + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + outcome + + + + + + + + + + + + + State + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Objective + + + + + + + + + + + + + + + + + + + + + + Mechanism + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Composition Inputs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Monito + r + ed + + + + + + + + + + + + + + + + + + + + + + + + V + alues + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + net_outcome + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + cont + r + ol_allocation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + state_featu + r + es + + + +B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Mechanism + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Mechanism + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Mechanism + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Model-based + + + + + + + + + + + + + + + + + + + + + + + + Cont + r + ol + + + + + + + + + + + + + + + + + + + + + + Evaluation + + + + + + + + + + + + + + + + + + + + + + + + Processing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Mechanism + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Mechanism + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Mechanism + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Objective + + + + + + + + + + + + + + + + + + + + + + Mechanism + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + “Model-f + r + ee” + + + + + + + + + + + + + + + + + + + + Features + + + + + + + + + + + + + + + + + + + + Outcome + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Optimization + + + + + + + + + + + Cont + r + ol + + + + + + + + + + + Mechanism + + + + + + + + + + + + + + + + + + + + + + Simulation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Objective + + + + + + + + + + + + + + + + + + + + + + Mechanism + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + agent_rep + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + agent_rep + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Composition + + + + + + + + + + + Composition + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Optimization + + + + + + + + + + + Cont + r + ol + + + + + + + + + + + Mechanism + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Composition + + + + + + + + + + + + + + + + + + + + + + Function + + + + + + + + + + + + + + + + + + + + + Approximator + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Adaptation + + + + + + + + + diff --git a/docs/source/index.rst b/docs/source/index.rst index a794541a602..c02d7a7ddd6 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -120,11 +120,16 @@ characteristics that are often (at least in the initial stages of development) i interpreted vs. compiled). That said, priorities for ongoing development of PsyNeuLink are: - i) acceleration, using just-in-time compilation methods and parallelization (see `Compilation`); + i) acceleration, using just-in-time compilation methods and parallelization + (see `Compilation`, and `Vesely et al., 2022 `_); ii) enhancement of the API to facilitate wrapping modules from other packages for integration into the PsyNeuLink - environment (examples currently exist for Pytorch and Emergent); - iii) integration of tools for parameter estimation, model comparison and data fitting; and - iv) a graphic interface for the construction of models and realtime display of their execution. + environment (examples currently exist for `Pytorch `_ ) and translating into a standard + `Model Description Format (MDF) `_; + iii) integration of tools for parameter estimation, model comparison and data fitting + (see `ParameterEstimationComposition`); and + iv) a graphic interface for the construction of models and realtime display of their execution + (see `PsyNeuLinkView `_). + .. _Overview: @@ -244,37 +249,36 @@ Acknowledgements *(in alphabetical order)* -* **Allie Burton**, Princeton Neuroscience Institute, Princeton University +* **Allie Burton**, Princeton Neuroscience Institute, Princeton University (formerly) * **Laura Bustamante**, Princeton Neuroscience Institute, Princeton University * **Jonathan D. Cohen**, Princeton Neuroscience Institute, Princeton University -* **Samyak Gupta**, Department of Computer Science, Rutgers University +* **Samyak Gupta**, Department of Computer Science, Princeton University * **Abigail Hoskin**, Department of Psychology, Princeton University -* **Peter Johnson**, Princeton Neuroscience Institute, Princeton University +* **Peter Johnson**, Princeton Neuroscience Institute, Princeton University (formerly) * **Justin Junge**, Department of Psychology, Princeton University -* **Jeremy Lee**, Princeton Neuroscience Institute, Princeton University * **Qihong Lu**, Department of Psychology, Princeton University -* **Kristen Manning**, Princeton Neuroscience Institute, Princeton University +* **Kristen Manning**, Princeton Neuroscience Institute, Princeton University (formerly) * **Katherine Mantel**, Princeton Neuroscience Institute, Princeton University * **Lena Rosendahl**, Department of Mechanical and Aerospace Engineering, Princeton University -* **Dillon Smith**, Princeton Neuroscience Institute, Princeton University -* **Markus Spitzer**, Princeton Neuroscience Institute, Princeton University +* **Dillon Smith**, Princeton Neuroscience Institute, Princeton University (formerly) +* **Markus Spitzer**, Princeton Neuroscience Institute, Princeton University (formerly) * **David Turner**, Princeton Neuroscience Institute, Princeton University -* **Jan Vesely**, Department of Computer Science, Rutgers University -* **Changyan Wang**, Princeton Neuroscience Institute, Princeton University -* **Nate Wilson**, Princeton Neuroscience Institute, Princeton University +* **Jan Vesely**, Department of Computer Science, Rutgers University (formerly) +* **Changyan Wang**, Princeton Neuroscience Institute, Princeton University (formerly) +* **Nate Wilson**, Princeton Neuroscience Institute, Princeton University (formerly) With substantial and greatly appreciated assistance from: -* **Abhishek Bhattacharjee**, Department of Computer Science, Rutgers University +* **Abhishek Bhattacharjee**, Department of Computer Science, Yale University * **Mihai Capota**, Intel Labs, Intel Corporation * **Bryn Keller**, Intel Labs, Intel Corporation -* **Susan Liu**, Princeton Neuroscience Institute, Princeton University +* **Susan Liu**, Princeton Neuroscience Institute, Princeton University (formerly) * **Garrett McGrath**, Princeton Neuroscience Institute, Princeton University -* **Sebastian Musslick**, Princeton Neuroscience Institute, Princeton University +* **Sebastian Musslick**, Princeton Neuroscience Institute, Princeton University (formerly) * **Amitai Shenhav**, Cognitive, Linguistic, & Psychological Sciences, Brown University -* **Michael Shvartsman**, Princeton Neuroscience Institute, Princeton University +* **Michael Shvartsman**, Princeton Neuroscience Institute, Princeton University (formerly) * **Ben Singer**, Princeton Neuroscience Institute, Princeton University -* **Ted Willke**, Intel Labs, Intel Corporation +* **Ted Willke**, Brain Inspired Computing Lab, Intel Corporation *Support* ~~~~~~~~~ diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 44214c1e4da..548fe2cfdd8 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -498,7 +498,9 @@ import itertools import logging import numbers +import re import types +import typing import warnings from abc import ABCMeta from collections.abc import Iterable @@ -648,14 +650,16 @@ def make_parameter_property(param): def getter(self): p = getattr(self.parameters, param.name) - if p.modulable: + if p.port is not None: + assert p.modulable return getattr(self, _get_parametervalue_attr(p)) else: return p._get(self.most_recent_context) def setter(self, value): p = getattr(self.parameters, param.name) - if p.modulable: + if p.port is not None: + assert p.modulable warnings.warn( 'Setting parameter values directly using dot notation' ' may be removed in a future release. It is replaced with,' @@ -1099,8 +1103,6 @@ def __init__(self, Note: if parameter_validation is off, validation is suppressed (for efficiency) (Component class default = on) """ - self._handle_illegal_kwargs(**kwargs) - context = Context( source=ContextFlags.CONSTRUCTOR, execution_phase=ContextFlags.IDLE, @@ -1117,6 +1119,16 @@ def __init__(self, except (KeyError, TypeError): function_params = {} + # if function is string, assume any unknown kwargs are for the + # corresponding UDF expression + if isinstance(function, (types.FunctionType, str)): + function_params = { + **kwargs, + **function_params + } + else: + self._handle_illegal_kwargs(**kwargs) + # allow override of standard arguments with arguments specified in # params (here, param_defaults) argument # (if there are duplicates, later lines override previous) @@ -1147,6 +1159,14 @@ def __init__(self, self.defaults.variable = copy.deepcopy(default_variable) self.parameters.variable._user_specified = True + self.parameters.variable._set( + copy_parameter_value(default_variable), + context=context, + skip_log=True, + skip_history=True, + override=True + ) + # ASSIGN PREFS _assign_prefs(self, prefs, BasePreferenceSet) @@ -1324,7 +1344,12 @@ def _convert(p): x = p.get(context) if isinstance(x, np.random.RandomState): # Skip first element of random state (id string) - val = pnlvm._tupleize(x.get_state()[1:]) + val = pnlvm._tupleize((*x.get_state()[1:], x.used_seed[0])) + elif isinstance(x, np.random.Generator): + state = x.bit_generator.state + val = pnlvm._tupleize((state['state']['counter'], state['state']['key'], + state['buffer'], state['uinteger'], state['buffer_pos'], + state['has_uint32'], x.used_seed[0])) elif isinstance(x, Time): val = tuple(getattr(x, graph_scheduler.time._time_scale_to_attr_str(t)) for t in TimeScale) elif isinstance(x, Component): @@ -1348,13 +1373,14 @@ def _get_compilation_params(self): # Invalid types "input_port_variables", "results", "simulation_results", "monitor_for_control", "state_feature_values", "simulation_ids", - "input_labels_dict", "output_labels_dict", - "modulated_mechanisms", "grid", + "input_labels_dict", "output_labels_dict", "num_estimates", + "modulated_mechanisms", "grid", "control_signal_params", "activation_derivative_fct", "input_specification", # Reference to other components "objective_mechanism", "agent_rep", "projections", # Shape mismatch "auto", "hetero", "cost", "costs", "combined_costs", + "control_signal", # autodiff specific types "pytorch_representation", "optimizer"} # Mechanism's need few extra entires: @@ -1420,8 +1446,9 @@ def _convert(x): elif isinstance(x, Component): return x._get_param_initializer(context) - try: # This can't use tupleize and needs to recurse to handle - # 'search_space' list of SampleIterators + try: + # This can't use tupleize and needs to recurse to handle + # 'search_space' list of SampleIterators return tuple(_convert(i) for i in x) except TypeError: return x if x is not None else tuple() @@ -1431,8 +1458,8 @@ def _get_values(p): # Modulated parameters change shape to array if np.ndim(param) == 0 and self._is_param_modulated(p): return (param,) - elif p.name == 'num_estimates': - return 0 if param is None else param + elif p.name == 'num_trials_per_estimate': # Should always be int + return 0 if param is None else int(param) elif p.name == 'matrix': # Flatten matrix return tuple(np.asfarray(param).flatten()) return _convert(param) @@ -1684,16 +1711,8 @@ def _deferred_init(self, **kwargs): self._init_args.update(kwargs) # Complete initialization - # MODIFIED 10/27/18 OLD: super(self.__class__,self).__init__(**self._init_args) - # MODIFIED 10/27/18 NEW: FOLLOWING IS NEEDED TO HANDLE FUNCTION DEFERRED INIT (JDC) - # try: - # super(self.__class__,self).__init__(**self._init_args) - # except: - # self.__init__(**self._init_args) - # MODIFIED 10/27/18 END - # If name was assigned, "[DEFERRED INITIALIZATION]" was appended to it, so remove it if DEFERRED_INITIALIZATION in self.name: self.name = self.name.replace("[" + DEFERRED_INITIALIZATION + "]", "") @@ -1947,6 +1966,7 @@ def _initialize_parameters(self, context=None, **param_defaults): Composition_Base, ComponentsMeta, types.MethodType, + types.ModuleType, functools.partial, ) alias_names = {p.name for p in self.class_parameters if isinstance(p, ParameterAlias)} @@ -2040,7 +2060,10 @@ def _is_user_specified(parameter): p.spec = copy_parameter_value(p.spec) # set default to None context to ensure it exists - if p.getter is None and p._get(context) is None: + if ( + p._get(context) is None and p.getter is None + or context.execution_id not in p.values + ): if p._user_specified: val = param_defaults[p.name] @@ -2536,10 +2559,10 @@ def _validate_params(self, request_set, target_set=None, context=None): inspect.isclass(param_value) and inspect.isclass(getattr(self.defaults, param_name)) and issubclass(param_value, getattr(self.defaults, param_name))): - # Assign instance to target and move on - # (compatiblity check no longer needed and can't handle function) - target_set[param_name] = param_value() - continue + # Assign instance to target and move on + # (compatiblity check no longer needed and can't handle function) + target_set[param_name] = param_value() + continue # Check if param value is of same type as one with the same name in defaults # don't worry about length @@ -2818,11 +2841,14 @@ def _instantiate_function(self, function, function_params=None, context=None): # Specification is a standard python function, so wrap as a UserDefnedFunction # Note: parameter_ports for function's parameters will be created in_instantiate_attributes_after_function - if isinstance(function, types.FunctionType): - function = UserDefinedFunction(default_variable=function_variable, - custom_function=function, - owner=self, - context=context) + if isinstance(function, (types.FunctionType, str)): + function = UserDefinedFunction( + default_variable=function_variable, + custom_function=function, + owner=self, + context=context, + **function_params, + ) # Specification is an already implemented Function elif isinstance(function, Function): @@ -3110,7 +3136,7 @@ def _parse_param_port_sources(self): self.parameter_ports.parameter_mapping[param_port.source] = param_port except TypeError: pass - param_port.source._port = param_port + param_port.source.port = param_port def _get_current_parameter_value(self, parameter, context=None): from psyneulink.core.components.ports.parameterport import ParameterPortError @@ -3513,6 +3539,81 @@ def _propagate_most_recent_context(self, context=None, visited=None): visited.add(obj) obj._propagate_most_recent_context(context, visited) + def all_dependent_parameters( + self, + filter_name: typing.Union[str, typing.Iterable[str]] = None, + filter_regex: typing.Union[str, typing.Iterable[str]] = None, + ): + """Dictionary of Parameters of this Component and its \ + `_dependent_components` filtered by **filter_name** and \ + **filter_regex**. If no filter is specified, all Parameters \ + are included. + + Args: + filter_name (Union[str, Iterable[str]], optional): The \ + exact name or names of Parameters to include. Defaults \ + to None. + filter_regex (Union[str, Iterable[str]], optional): \ + Regular expression patterns. If any pattern matches a \ + Parameter name (using re.match), it will be included \ + in the result. Defaults to None. + + Returns: + dict[Parameter:Component]: Dictionary of filtered Parameters + """ + def _all_dependent_parameters(obj, filter_name, filter_regex, visited): + parameters = {} + + if isinstance(filter_name, str): + filter_name = [filter_name] + + if isinstance(filter_regex, str): + filter_regex = [filter_regex] + + if filter_name is not None: + filter_name = set(filter_name) + + try: + filter_regex = [re.compile(r) for r in filter_regex] + except TypeError: + pass + + for p in obj.parameters: + include = filter_name is None and filter_regex is None + + if filter_name is not None: + if p.name in filter_name: + include = True + + if not include and filter_regex is not None: + for r in filter_regex: + if r.match(p.name): + include = True + break + + # owner check is primarily for value parameter on + # Composition which is deleted in + # ba56af82585e2d61f5b5bd13d9a19b7ee3b60124 presumably + # for clarity (results is used instead) + if include and p._owner._owner is obj: + parameters[p] = obj + + for c in obj._dependent_components: + if c not in visited: + visited.add(c) + parameters.update( + _all_dependent_parameters( + c, + filter_name, + filter_regex, + visited + ) + ) + + return parameters + + return _all_dependent_parameters(self, filter_name, filter_regex, set()) + @property def _dict_summary(self): from psyneulink.core.compositions.composition import Composition @@ -3755,21 +3856,14 @@ def _update_parameter_components(self, context=None): # store all Components in Parameters to be used in # _dependent_components for _initialize_from_context for p in self.parameters: + param_value = p._get(context) try: - param_value = p._get(context) - try: - param_value = param_value.__self__ - except AttributeError: - pass + param_value = param_value.__self__ + except AttributeError: + pass - if isinstance(param_value, Component) and param_value is not self: - self._parameter_components.add(param_value) - # ControlMechanism and GatingMechanism have Parameters that only - # throw these errors - except Exception as e: - # cannot import the specific exceptions due to circularity - if 'attribute is not implemented on' not in str(e): - raise + if isinstance(param_value, Component) and param_value is not self: + self._parameter_components.add(param_value) @property def _dependent_components(self): @@ -3855,23 +3949,20 @@ def __repr__(self): @property def modulated(self): - try: - is_modulated = (self._parameter in self._owner.parameter_ports) - except AttributeError: - is_modulated = False - - try: - is_modulated = is_modulated or (self._parameter in self._owner.owner.parameter_ports) - except AttributeError: - pass - - if is_modulated: - return self._owner._get_current_parameter_value( + # TODO: consider making this + # self._parameter.port.is_modulated(self._owner.most_recent_context) + # because the port existing doesn't necessarily mean modulation + # is actually happening + if self._parameter.port is not None: + return self._parameter.port.owner._get_current_parameter_value( self._parameter, self._owner.most_recent_context ) else: - warnings.warn(f'{self._parameter.name} is not currently modulated.') + warnings.warn( + f'{self._parameter.name} is not currently modulated in most' + f' recent context {self._owner.most_recent_context}' + ) return None @modulated.setter diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index d1241cfba51..5b8cbb10ca7 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -162,7 +162,8 @@ from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel from psyneulink.core.globals.registry import register_category from psyneulink.core.globals.utilities import ( - convert_to_np_array, get_global_seed, object_has_single_value, parameter_spec, safe_len + convert_to_np_array, get_global_seed, object_has_single_value, parameter_spec, safe_len, + SeededRandomState ) __all__ = [ @@ -333,49 +334,38 @@ def _seed_setter(value, owning_component, context): if value in {None, DEFAULT_SEED}: value = get_global_seed() - value = int(value) - - owning_component.parameters.random_state._set( - np.random.RandomState([value]), - context - ) - - return value + # Remove any old PRNG state + owning_component.parameters.random_state.set(None, context=context) + return int(value) def _random_state_getter(self, owning_component, context): - seed_param = owning_component.parameters.seed + seed_param = owning_component.parameters.seed try: - is_modulated = seed_param._port.is_modulated(context) + is_modulated = seed_param.port.is_modulated(context) except AttributeError: - # no ParameterPort - pass + is_modulated = False + + if is_modulated: + seed_value = [int(owning_component._get_current_parameter_value(seed_param, context))] else: - if is_modulated: - # can manage reset_for_context only in getter because we - # don't want to store any copied values from other contexts - # (from _initialize_from_context) - try: - reset_for_context = self._reset_for_context[context.execution_id] - except AttributeError: - self._reset_for_context = {} - reset_for_context = False - except KeyError: - reset_for_context = False - - if not reset_for_context: - self._reset_for_context[context.execution_id] = True - return np.random.RandomState([ - int( - owning_component._get_current_parameter_value( - seed_param, - context - ) - ) - ]) - - return self.values[context.execution_id] + seed_value = [int(seed_param._get(context=context))] + + if seed_value == [DEFAULT_SEED]: + raise FunctionError( + "Invalid seed for {} in context: {} ({})".format( + owning_component, context.execution_id, seed_param + ) + ) + + current_state = self.values.get(context.execution_id, None) + if current_state is None: + return SeededRandomState(seed_value) + if current_state.used_seed != seed_value: + return type(current_state)(seed_value) + + return current_state class Function_Base(Function): @@ -616,11 +606,14 @@ def __deepcopy__(self, memo): new = super().__deepcopy__(memo) # ensure copy does not have identical name register_category(new, Function_Base, new.name, FunctionRegistry) - try: + if "random_state" in new.parameters: # HACK: Make sure any copies are re-seeded to avoid dependent RNG. - new.random_state.seed([get_global_seed()]) - except: - pass + # functions with "random_state" param must have "seed" parameter + for ctx in new.parameters.seed.values: + new.parameters.seed.set( + DEFAULT_SEED, ctx, skip_log=True, skip_history=True + ) + return new @handle_external_context() @@ -761,7 +754,7 @@ def owner_name(self): except AttributeError: return '' - def _is_identity(self, context=None): + def _is_identity(self, context=None, defaults=False): # should return True in subclasses if the parameters for context are such that # the Function's output will be the same as its input # Used to bypass execute when unnecessary @@ -773,8 +766,8 @@ def _model_spec_parameter_blacklist(self): 'multiplicative_param', 'additive_param', }) -# ***************************************** EXAMPLE FUNCTION ******************************************************* +# ***************************************** EXAMPLE FUNCTION ******************************************************* PROPENSITY = "PROPENSITY" PERTINACITY = "PERTINACITY" diff --git a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py index fac23daaad1..3556e2b054c 100644 --- a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py @@ -44,7 +44,7 @@ PREDICTION_ERROR_DELTA_FUNCTION, PRODUCT, REARRANGE_FUNCTION, REDUCE_FUNCTION, SCALE, SUM, WEIGHTS, \ PREFERENCE_SET_NAME from psyneulink.core.globals.utilities import convert_to_np_array, is_numeric, np_array_less_than_2d, parameter_spec -from psyneulink.core.globals.context import Context, ContextFlags +from psyneulink.core.globals.context import ContextFlags from psyneulink.core.globals.parameters import Parameter from psyneulink.core.globals.preferences.basepreferenceset import \ REPORT_OUTPUT_PREF, is_pref_set, PreferenceEntry, PreferenceLevel @@ -111,10 +111,10 @@ class Concatenate(CombinationFunction): # ------------------------------------- .. _Concatenate: - Concatenates items in outer dimension (axis 0) of of `variable ` into a single array, + Concatenates items in outer dimension (axis 0) of `variable ` into a single array, optionally scaling and/or adding an offset to the result after concatenating. - `function ` returns a 1d array with lenght equal to the sum of the lengths of the items + `function ` returns a 1d array with length equal to the sum of the lengths of the items in `variable `. Arguments @@ -858,6 +858,13 @@ def _function(self, return self.convert_output_type(result) + def _get_input_struct_type(self, ctx): + # FIXME: Workaround a special case of simple array. + # It should just pass through to modifiers, which matches what + # single element 2d array does + default_var = np.atleast_2d(self.defaults.variable) + return ctx.convert_python_struct_to_llvm_ir(default_var) + def _gen_llvm_combine(self, builder, index, ctx, vi, vo, params): scale = self._gen_llvm_load_param(ctx, builder, params, SCALE, index, 1.0) offset = self._gen_llvm_load_param(ctx, builder, params, OFFSET, index, -0.0) diff --git a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py index edc2bd70fae..aa6e6ef9ba1 100644 --- a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py @@ -36,7 +36,6 @@ ADDITIVE_PARAM, DIST_FUNCTION_TYPE, BETA, DIST_MEAN, DIST_SHAPE, DRIFT_DIFFUSION_ANALYTICAL_FUNCTION, \ EXPONENTIAL_DIST_FUNCTION, GAMMA_DIST_FUNCTION, HIGH, LOW, MULTIPLICATIVE_PARAM, NOISE, NORMAL_DIST_FUNCTION, \ SCALE, STANDARD_DEVIATION, THRESHOLD, UNIFORM_DIST_FUNCTION, WALD_DIST_FUNCTION -from psyneulink.core.globals.context import ContextFlags from psyneulink.core.globals.utilities import convert_to_np_array, parameter_spec from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set @@ -152,7 +151,7 @@ class Parameters(DistributionFunction.Parameters): mean = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) standard_deviation = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) @tc.typecheck def __init__(self, @@ -196,11 +195,11 @@ def _function(self, return self.convert_output_type(result) def _gen_llvm_function_body(self, ctx, builder, params, state, _, arg_out, *, tags:frozenset): - random_state = pnlvm.helpers.get_state_ptr(builder, self, state, "random_state") + random_state = ctx.get_random_state_ptr(builder, self, state, params) mean_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, "mean") std_dev_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, "standard_deviation") ret_val_ptr = builder.alloca(ctx.float_ty) - norm_rand_f = ctx.import_llvm_function("__pnl_builtin_mt_rand_normal") + norm_rand_f = ctx.get_normal_dist_function_by_state(random_state) builder.call(norm_rand_f, [random_state, ret_val_ptr]) ret_val = builder.load(ret_val_ptr) @@ -331,7 +330,7 @@ class Parameters(DistributionFunction.Parameters): :type: ``float`` """ random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) variable = Parameter(np.array([0]), read_only=True, pnl_internal=True, constructor_argument='default_variable') mean = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) standard_deviation = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) @@ -460,7 +459,7 @@ class Parameters(DistributionFunction.Parameters): """ beta = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) @tc.typecheck def __init__(self, @@ -585,7 +584,7 @@ class Parameters(DistributionFunction.Parameters): low = Parameter(0.0, modulable=True) high = Parameter(1.0, modulable=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) @tc.typecheck def __init__(self, @@ -620,6 +619,28 @@ def _function(self, return self.convert_output_type(result) + def _gen_llvm_function_body(self, ctx, builder, params, state, _, arg_out, *, tags:frozenset): + random_state = ctx.get_random_state_ptr(builder, self, state, params) + low_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, LOW) + high_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, HIGH) + ret_val_ptr = builder.alloca(ctx.float_ty) + norm_rand_f = ctx.get_uniform_dist_function_by_state(random_state) + builder.call(norm_rand_f, [random_state, ret_val_ptr]) + + ret_val = builder.load(ret_val_ptr) + high = pnlvm.helpers.load_extract_scalar_array_one(builder, high_ptr) + low = pnlvm.helpers.load_extract_scalar_array_one(builder, low_ptr) + scale = builder.fsub(high, low) + + ret_val = builder.fmul(ret_val, scale) + ret_val = builder.fadd(ret_val, low) + + while isinstance(arg_out.type.pointee, pnlvm.ir.ArrayType): + assert len(arg_out.type.pointee) == 1 + arg_out = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(0)]) + builder.store(ret_val, arg_out) + return builder + class GammaDist(DistributionFunction): """ @@ -718,7 +739,7 @@ class Parameters(DistributionFunction.Parameters): :type: ``float`` """ random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) scale = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) dist_shape = Parameter(1.0, modulable=True, aliases=[ADDITIVE_PARAM]) @@ -852,7 +873,7 @@ class Parameters(DistributionFunction.Parameters): :type: ``float`` """ random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) scale = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) mean = Parameter(1.0, modulable=True, aliases=[ADDITIVE_PARAM]) diff --git a/psyneulink/core/components/functions/nonstateful/learningfunctions.py b/psyneulink/core/components/functions/nonstateful/learningfunctions.py index 2cfb8d330e9..c00959d8f6b 100644 --- a/psyneulink/core/components/functions/nonstateful/learningfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/learningfunctions.py @@ -419,7 +419,7 @@ class Parameters(LearningFunction.Parameters): :type: ``int`` """ random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) variable = Parameter([np.array([0, 0, 0]), np.array([0])], read_only=True, @@ -1393,15 +1393,24 @@ def _function(self, def _activation_input_getter(owning_component=None, context=None): - return owning_component.parameters.variable._get(context)[LEARNING_ACTIVATION_INPUT] + try: + return owning_component.parameters.variable._get(context)[LEARNING_ACTIVATION_INPUT] + except (AttributeError, TypeError): + return None def _activation_output_getter(owning_component=None, context=None): - return owning_component.parameters.variable._get(context)[LEARNING_ACTIVATION_OUTPUT] + try: + return owning_component.parameters.variable._get(context)[LEARNING_ACTIVATION_OUTPUT] + except (AttributeError, TypeError): + return None def _error_signal_getter(owning_component=None, context=None): - return owning_component.parameters.variable._get(context)[LEARNING_ERROR_OUTPUT] + try: + return owning_component.parameters.variable._get(context)[LEARNING_ERROR_OUTPUT] + except (AttributeError, TypeError): + return None diff --git a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py index e4dcfbc07d5..286cf63a86e 100644 --- a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py +++ b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py @@ -28,11 +28,10 @@ from psyneulink.core import llvm as pnlvm from psyneulink.core.components.component import DefaultsFlexibility from psyneulink.core.components.functions.function import EPSILON, FunctionError, Function_Base, get_matrix -from psyneulink.core.globals.context import ContextFlags from psyneulink.core.globals.keywords import \ CORRELATION, COSINE, CROSS_ENTROPY, \ DEFAULT_VARIABLE, DIFFERENCE, DISTANCE_FUNCTION, DISTANCE_METRICS, DistanceMetrics, \ - ENERGY, ENTROPY, EUCLIDEAN, HOLLOW_MATRIX, MATRIX, MAX_ABS_DIFF, METRIC, \ + ENERGY, ENTROPY, EUCLIDEAN, HOLLOW_MATRIX, MATRIX, MAX_ABS_DIFF, \ NORMED_L0_SIMILARITY, OBJECTIVE_FUNCTION_TYPE, SIZE, STABILITY_FUNCTION from psyneulink.core.globals.parameters import Parameter from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index c3880a25b29..15bae402ebb 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -53,13 +53,15 @@ from psyneulink.core.globals.utilities import call_with_pruned_args __all__ = ['OptimizationFunction', 'GradientOptimization', 'GridSearch', 'GaussianProcess', 'ParamEstimationFunction', - 'ASCENT', 'DESCENT', 'DIRECTION', 'MAXIMIZE', 'MINIMIZE', 'OBJECTIVE_FUNCTION', - 'SEARCH_FUNCTION', 'SEARCH_SPACE', 'SEARCH_TERMINATION_FUNCTION', 'SIMULATION_PROGRESS' + 'ASCENT', 'DESCENT', 'DIRECTION', 'MAXIMIZE', 'MINIMIZE', 'OBJECTIVE_FUNCTION', 'SEARCH_FUNCTION', + 'SEARCH_SPACE', 'RANDOMIZATION_DIMENSION', 'SEARCH_TERMINATION_FUNCTION', 'SIMULATION_PROGRESS' ] OBJECTIVE_FUNCTION = 'objective_function' +AGGREGATION_FUNCTION = 'aggregation_function' SEARCH_FUNCTION = 'search_function' SEARCH_SPACE = 'search_space' +RANDOMIZATION_DIMENSION = 'randomization_dimension' SEARCH_TERMINATION_FUNCTION = 'search_termination_function' DIRECTION = 'direction' SIMULATION_PROGRESS = 'simulation_progress' @@ -69,6 +71,13 @@ def __init__(self, error_value): self.error_value = error_value +def _num_estimates_getter(owning_component, context): + if owning_component.parameters.randomization_dimension._get(context) is None: + return 1 + else: + return owning_component.parameters.search_space._get(context)[owning_component.randomization_dimension].num + + class OptimizationFunction(Function_Base): """ OptimizationFunction( \ @@ -76,6 +85,7 @@ class OptimizationFunction(Function_Base): objective_function=lambda x:0, \ search_function=lambda x:x, \ search_space=[0], \ + randomization_dimension=None, \ search_termination_function=lambda x,y,z:True, \ save_samples=False, \ save_values=False, \ @@ -102,18 +112,22 @@ class OptimizationFunction(Function_Base): When `function ` is executed, it iterates over the following steps: - get sample from `search_space ` by calling `search_function - ` + `; + .. + - estimate the value of `objective_function ` for the sample + by calling `objective_function ` the number of times + specified in its `num_estimates ` attribute; .. - - compute value of `objective_function ` for the sample - by calling `objective_function `; + - aggregate value of the estimates using `aggregation_function ` + (the default is to average the values; if `aggregation_function ` + is not specified, the entire list of estimates is returned); .. - - report value returned by `objective_function ` for the sample - by calling `report_value `; + - report the aggregated value for the sample by calling `report_value `; .. - evaluate `search_termination_function `. - The current iteration numberris contained in `iteration `. Iteration continues until - all values of `search_space ` have been evaluated and/or + The current iteration number is contained in `iteration `. Iteration continues + until all values of `search_space ` have been evaluated and/or `search_termination_function ` returns `True`. The `function ` returns: @@ -146,18 +160,17 @@ class OptimizationFunction(Function_Base): the arguments of an OptimizationFunction or its subclasses; this can be suppressed by specifying the relevant argument(s) as `NotImplemnted`. - COMMENT: - NOTES TO DEVELOPERS: - - Constructors of subclasses should include **kwargs in their constructor method, to accomodate arguments required - by some subclasses but not others (e.g., search_space needed by `GridSearch` but not `GradientOptimization`) so - that subclasses can be used interchangeably by OptimizationMechanisms. + .. technical_note:: + - Constructors of subclasses should include **kwargs in their constructor method, to accomodate arguments + required by some subclasses but not others (e.g., search_space needed by `GridSearch` but not + `GradientOptimization`) so that subclasses can be used interchangeably by OptimizationControlMechanism. - - Subclasses with attributes that depend on one of the OptimizationFunction's parameters should implement the - `reset ` method, that calls super().reset(*args) and then - reassigns the values of the dependent attributes accordingly. If an argument is not needed for the subclass, - `NotImplemented` should be passed as the argument's value in the call to super (i.e., the OptimizationFunction's - constructor). - COMMENT + - Subclasses with attributes that depend on one of the OptimizationFunction's parameters should implement the + `reset ` method, that calls super().reset(*args) and then + reassigns the values of the dependent attributes accordingly. If an argument is not needed for the subclass, + `NotImplemented` should be passed as the argument's value in the call to super (i.e., + the OptimizationFunction's + constructor). Arguments @@ -168,9 +181,18 @@ class OptimizationFunction(Function_Base): `objective_function `. objective_function : function or method : default None - specifies function used to evaluate sample in each iteration of the `optimization process - `; if it is not specified, a default function is used that simply returns - the value passed as its `variable ` parameter (see `note + specifies function used to make a single estimate for a sample, `num_estimates + ` of which are made for a given sample in each iteration of the + `optimization process `; if it is not specified, a default function is used + that simply returns the value passed as its `variable ` parameter (see `note + `). + + aggregation_function : function or method : default None + specifies function used to aggregate the values returned over the `num_estimates + ` calls to the `objective_function + ` for a given sample in each iteration of the `optimization + process `; if it is not specified, a default function is used that simply + returns the value passed as its `variable ` parameter (see `note `). search_function : function or method : default None @@ -190,6 +212,10 @@ class OptimizationFunction(Function_Base): executes exactly once using the value passed as its `variable ` parameter (see `note `). + randomization_dimension : int + specifies the index of `search_space ` containing the seeds for use in + randomization over each estimate of a sample (see `num_estimates `). + search_termination_function : function or method : None specifies function used to terminate iterations of the `optimization process `. It must return a boolean value, and it **must be specified** if the @@ -231,11 +257,29 @@ class OptimizationFunction(Function_Base): `objective_function ` in each iteration of the `optimization process `. The number of SampleIterators in the list determines the dimensionality of each sample: in each iteration of the `optimization process `, each - SampleIterator is called upon to provide the value for one of the dimensions of the sample.m`NotImplemented` + SampleIterator is called upon to provide the value for one of the dimensions of the sample if the `objective_function ` generates its own samples. If it is required and not specified, the optimization process executes exactly once using the value passed as its `variable ` parameter (see `note `). + randomization_dimension : int or None + the index of `search_space ` containing the seeds for use in randomization + over each estimate of a sample (see `num_estimates `); if num_estimates + is not specified, this is None, and only a single estimate is made for each sample. + + num_estimates : int or None + the number of independent estimates evaluated (i.e., calls made to the OptimizationFunction's + `objective_function ` for each sample, and aggregated over + by its `aggregation_function ` to determine the estimated value + for a given sample. This is determined from the `search_space ` by + accessing its `randomization_dimension ` and determining the + the length of (i.e., number of elements specified for) that dimension. + + aggregation_function : function or method + used to aggregate the values returned over the `num_estimates ` calls to + the `objective_function ` for a given sample in each iteration of + the `optimization process `. + search_termination_function : function or method that returns a boolean value used to terminate iterations of the `optimization process `; if it is required and not specified, the optimization process executes exactly once (see `note `). @@ -278,12 +322,23 @@ class Parameters(Function_Base.Parameters): :default value: None :type: + num_estimates + see `num_estimates ` + + :default value: None + :type: ``int`` + objective_function see `objective_function ` :default value: lambda x: 0 :type: ``types.FunctionType`` + randomization_dimension + see `randomization_dimension ` + :default value: None + :type: ``int`` + save_samples see `save_samples ` @@ -331,9 +386,14 @@ class Parameters(Function_Base.Parameters): variable = Parameter(np.array([0, 0, 0]), read_only=True, pnl_internal=True, constructor_argument='default_variable') objective_function = Parameter(lambda x: 0, stateful=False, loggable=False) + aggregation_function = Parameter(lambda x,n: sum(x) / n, stateful=False, loggable=False) search_function = Parameter(lambda x: x, stateful=False, loggable=False) search_termination_function = Parameter(lambda x, y, z: True, stateful=False, loggable=False) search_space = Parameter([SampleIterator([0])], stateful=False, loggable=False) + randomization_dimension = Parameter(None, stateful=False, loggable=False) + num_estimates = Parameter(None, stateful=True, loggable=True, read_only=True, + dependencies=[randomization_dimension, search_space], + getter=_num_estimates_getter) save_samples = Parameter(False, pnl_internal=True) save_values = Parameter(False, pnl_internal=True) @@ -349,8 +409,10 @@ def __init__( self, default_variable=None, objective_function:tc.optional(is_function_type)=None, + aggregation_function:tc.optional(is_function_type)=None, search_function:tc.optional(is_function_type)=None, search_space=None, + randomization_dimension=None, search_termination_function:tc.optional(is_function_type)=None, save_samples:tc.optional(bool)=None, save_values:tc.optional(bool)=None, @@ -367,12 +429,17 @@ def __init__( if objective_function is None: self._unspecified_args.append(OBJECTIVE_FUNCTION) + if aggregation_function is None: + self._unspecified_args.append(AGGREGATION_FUNCTION) + if search_function is None: self._unspecified_args.append(SEARCH_FUNCTION) if search_termination_function is None: self._unspecified_args.append(SEARCH_TERMINATION_FUNCTION) + self.randomization_dimension = randomization_dimension + super().__init__( default_variable=default_variable, save_samples=save_samples, @@ -380,6 +447,7 @@ def __init__( max_iterations=max_iterations, search_space=search_space, objective_function=objective_function, + aggregation_function=aggregation_function, search_function=search_function, search_termination_function=search_termination_function, params=params, @@ -399,6 +467,12 @@ def _validate_params(self, request_set, target_set=None, context=None): format(repr(OBJECTIVE_FUNCTION), self.__class__.__name__, request_set[OBJECTIVE_FUNCTION].__name__)) + if AGGREGATION_FUNCTION in request_set and request_set[AGGREGATION_FUNCTION] is not None: + if not is_function_type(request_set[AGGREGATION_FUNCTION]): + raise OptimizationFunctionError("Specification of {} arg for {} ({}) must be a function or method". + format(repr(AGGREGATION_FUNCTION), self.__class__.__name__, + request_set[AGGREGATION_FUNCTION].__name__)) + if SEARCH_FUNCTION in request_set and request_set[SEARCH_FUNCTION] is not None: if not is_function_type(request_set[SEARCH_FUNCTION]): raise OptimizationFunctionError("Specification of {} arg for {} ({}) must be a function or method". @@ -437,9 +511,11 @@ def reset( self, default_variable=None, objective_function=None, + aggregation_function=None, search_function=None, search_termination_function=None, search_space=None, + randomization_dimension=None, context=None ): """Reset parameters of the OptimizationFunction @@ -457,6 +533,8 @@ def reset( request_set={ 'default_variable': default_variable, 'objective_function': objective_function, + 'aggregation_function': aggregation_function, + RANDOMIZATION_DIMENSION : randomization_dimension, 'search_function': search_function, 'search_termination_function': search_termination_function, 'search_space': search_space, @@ -469,6 +547,10 @@ def reset( self.parameters.objective_function._set(objective_function, context) if OBJECTIVE_FUNCTION in self._unspecified_args: del self._unspecified_args[self._unspecified_args.index(OBJECTIVE_FUNCTION)] + if aggregation_function is not None: + self.parameters.aggregation_function._set(aggregation_function, context) + if AGGREGATION_FUNCTION in self._unspecified_args: + del self._unspecified_args[self._unspecified_args.index(AGGREGATION_FUNCTION)] if search_function is not None: self.parameters.search_function._set(search_function, context) if SEARCH_FUNCTION in self._unspecified_args: @@ -481,6 +563,8 @@ def reset( self.parameters.search_space._set(search_space, context) if SEARCH_SPACE in self._unspecified_args: del self._unspecified_args[self._unspecified_args.index(SEARCH_SPACE)] + if randomization_dimension is not None: + self.parameters.randomization_dimension._set(randomization_dimension, context) def _function(self, variable=None, @@ -504,7 +588,6 @@ def _function(self, second list contains the values returned by `objective_function ` for all the samples in the order they were evaluated; otherwise it is empty. """ - if self._unspecified_args and self.initialization_status == ContextFlags.INITIALIZED: warnings.warn("The following arg(s) were not specified for {}: {} -- using default(s)". format(self.name, ', '.join(self._unspecified_args))) @@ -526,7 +609,6 @@ def _function(self, # Set up progress bar _show_progress = False - from psyneulink.core.compositions.report import ReportOutput if hasattr(self, OWNER) and self.owner and self.owner.prefs.reportOutputPref is SIMULATION_PROGRESS: _show_progress = True _progress_bar_char = '.' @@ -539,11 +621,11 @@ def _function(self, format(self.owner.name, repr(_progress_bar_char), _progress_bar_rate_str, _search_space_size)) _progress_bar_count = 0 # Iterate optimization process + while not call_with_pruned_args(self.search_termination_function, current_sample, current_value, iteration, context=context): - if _show_progress: increment_progress_bar = (_progress_bar_rate < 1) or not (_progress_bar_count % _progress_bar_rate) if increment_progress_bar: @@ -552,8 +634,14 @@ def _function(self, # Get next sample of sample new_sample = call_with_pruned_args(self.search_function, current_sample, iteration, context=context) - # Compute new value based on new sample - new_value = call_with_pruned_args(self.objective_function, new_sample, context=context) + + # Generate num_estimates of sample, then apply aggregation_function and return result + estimates = [] + num_estimates = self.num_estimates + for i in range(num_estimates): + estimate = call_with_pruned_args(self.objective_function, new_sample, context=context) + estimates.append(estimate) + new_value = self.aggregation_function(estimates, num_estimates) if self.aggregation_function else estimates self._report_value(new_value) iteration += 1 max_iterations = self.parameters.max_iterations._get(context) @@ -578,6 +666,29 @@ def _report_value(self, new_value): pass +class GridBasedOptimizationFunction(OptimizationFunction): + """Implement helper method for parallelizing instantiation for evaluating samples from search space.""" + + def _grid_evaluate(self, ocm, context): + + assert ocm is ocm.agent_rep.controller + # Compiled evaluate expects the same variable as mech function + variable = [input_port.parameters.value.get(context) for input_port in ocm.input_ports] + num_evals = np.prod([d.num for d in self.search_space]) + + # Map allocations to values + comp_exec = pnlvm.execution.CompExecution(ocm.agent_rep, [context.execution_id]) + execution_mode = ocm.parameters.comp_execution_mode._get(context) + if execution_mode == "PTX": + outcomes = comp_exec.cuda_evaluate(variable, num_evals) + elif execution_mode == "LLVM": + outcomes = comp_exec.thread_evaluate(variable, num_evals) + else: + assert False, f"Unknown execution mode for {ocm.name}: {execution_mode}." + + return outcomes, num_evals + + ASCENT = 'ascent' DESCENT = 'descent' @@ -1121,7 +1232,7 @@ def _convergence_condition(self, variable, value, iteration, context=None): MINIMIZE = 'minimize' -class GridSearch(OptimizationFunction): +class GridSearch(GridBasedOptimizationFunction): """ GridSearch( \ default_variable=None, \ @@ -1152,7 +1263,7 @@ class GridSearch(OptimizationFunction): `search_space 2` is contained in `num_iterations `). Iteration continues until all values in `search_space ` have been evaluated (i.e., `num_iterations ` is reached), or `max_iterations ` is - execeeded. The function returns the sample that yielded either the highest (if `direction ` + exceeded. The function returns the sample that yielded either the highest (if `direction ` is *MAXIMIZE*) or lowest (if `direction ` is *MINIMIZE*) value of the `objective_function `, along with the value for that sample, as well as lists containing all of the samples evaluated and their values if either `save_samples ` or `save_values @@ -1275,7 +1386,7 @@ class Parameters(OptimizationFunction.Parameters): save_samples = Parameter(True, pnl_internal=True) save_values = Parameter(True, pnl_internal=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) select_randomly_from_optimal_values = Parameter(False) direction = MAXIMIZE @@ -1489,7 +1600,7 @@ def _gen_llvm_select_min_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags:fr # Roll a dice to see if we should replace the current min prob = b.fdiv(opt_count.type(1), opt_count) - rand_f = ctx.import_llvm_function("__pnl_builtin_mt_rand_double") + rand_f = ctx.get_uniform_dist_function_by_state(random_state) b.call(rand_f, [random_state, rand_out_ptr]) rand_out = b.load(rand_out_ptr) replace = b.fcmp_ordered("<", rand_out, prob) @@ -1601,20 +1712,9 @@ def _gen_llvm_function_body(self, ctx, builder, params, state_features, arg_in, return builder def _run_grid(self, ocm, variable, context): - assert ocm is ocm.agent_rep.controller - # Compiled evaluate expects the same variable as mech function - new_variable = [ip.parameters.value.get(context) for ip in ocm.input_ports] - num_evals = np.prod([d.num for d in self.search_space]) - # Map allocations to values - comp_exec = pnlvm.execution.CompExecution(ocm.agent_rep, [context.execution_id]) - variant = ocm.parameters.comp_execution_mode._get(context) - if variant == "PTX": - ct_values = comp_exec.cuda_evaluate(new_variable, num_evals) - elif variant == "LLVM": - ct_values = comp_exec.thread_evaluate(new_variable, num_evals) - else: - assert False, "Unknown OCM execution variant: {}".format(variant) + # "ct" => c-type variables + ct_values, num_evals = self._grid_evaluate(ocm, context) assert len(ct_values) == num_evals # Reduce array of values to min/max @@ -1633,7 +1733,7 @@ def _run_grid(self, ocm, variable, context): bin_func(ct_param, ct_state, ct_opt_sample, ct_alloc, ct_opt_value, ct_values, ct_opt_count, ct_start, ct_stop) - return ct_opt_sample, ct_opt_value, ct_values + return np.ctypeslib.as_array(ct_opt_sample), ct_opt_value.value, np.ctypeslib.as_array(ct_values) def _function(self, variable=None, @@ -1691,7 +1791,6 @@ def _function(self, # Set up progress bar _show_progress = False - from psyneulink.core.compositions.report import ReportOutput if hasattr(self, OWNER) and self.owner and self.owner.prefs.reportOutputPref is SIMULATION_PROGRESS: _show_progress = True _progress_bar_char = '.' @@ -1762,14 +1861,24 @@ def _function(self, ocm = self._get_optimized_controller() - if ocm is not None and \ - (ocm.parameters.comp_execution_mode._get(context) == "PTX" or - ocm.parameters.comp_execution_mode._get(context) == "LLVM"): - opt_sample, opt_value, all_values = self._run_grid(ocm, variable, context) - # This should not be evaluated unless needed - all_samples = [itertools.product(*self.search_space)] - value_optimal = opt_value - sample_optimal = opt_sample + + # Compiled version + if ocm is not None and ocm.parameters.comp_execution_mode._get(context) in {"PTX", "LLVM"}: + opt_sample, opt_value, all_values = self._run_grid(ocm, variable, context) + # This should not be evaluated unless needed + all_samples = [s for s in itertools.product(*self.search_space)] + value_optimal = opt_value + sample_optimal = opt_sample + + # These are normally stored in the parent function (OptimizationFunction). + # Since we didn't call super()._function like the python path, + # save the values here + if self.parameters.save_samples._get(context): + self.parameters.saved_samples._set(all_samples, context) + if self.parameters.save_values._get(context): + self.parameters.saved_values._set(all_values, context) + + # Python version else: last_sample, last_value, all_samples, all_values = super()._function( variable=variable, @@ -2241,7 +2350,7 @@ class Parameters(OptimizationFunction.Parameters): """ variable = Parameter([[0], [0]], read_only=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) save_samples = True save_values = True @@ -2377,7 +2486,7 @@ def simulator(*args, **kwargs): # FIXME: This doesn't work at the moment. Need to use for loop below. # The batch_size is the number of estimates/simulations, set it on the # optimization control mechanism. - # self.owner.parameters.num_estimates.set(batch_size, execution_id) + # self.owner.parameters.num_trials_per_estimate.set(batch_size, execution_id) # Run batch_size simulations of the PsyNeuLink composition results = [] diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index a346d066c00..56d95e09ac2 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -35,7 +35,7 @@ ) from psyneulink.core.globals.keywords import \ MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR, \ - MODE, ONE_HOT_FUNCTION, PARAMETER_PORT_PARAMS, PROB, PROB_INDICATOR, SELECTION_FUNCTION_TYPE, PREFERENCE_SET_NAME + MODE, ONE_HOT_FUNCTION, PROB, PROB_INDICATOR, SELECTION_FUNCTION_TYPE, PREFERENCE_SET_NAME from psyneulink.core.globals.parameters import Parameter from psyneulink.core.globals.preferences.basepreferenceset import \ REPORT_OUTPUT_PREF, PreferenceEntry, PreferenceLevel, is_pref_set @@ -188,7 +188,7 @@ class Parameters(SelectionFunction.Parameters): """ mode = Parameter(MAX_VAL, stateful=False) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) def _validate_mode(self, mode): options = {MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, @@ -253,15 +253,15 @@ def _validate_params(self, request_set, target_set=None, context=None): "array of probabilities that sum to 1". format(MODE, self.__class__.__name__, Function.__name__, PROB, prob_dist)) - def _gen_llvm_function_body(self, ctx, builder, _, state, arg_in, arg_out, *, tags:frozenset): + def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): idx_ptr = builder.alloca(ctx.int32_ty) builder.store(ctx.int32_ty(0), idx_ptr) if self.mode in {PROB, PROB_INDICATOR}: - rng_f = ctx.import_llvm_function("__pnl_builtin_mt_rand_double") dice_ptr = builder.alloca(ctx.float_ty) - mt_state_ptr = pnlvm.helpers.get_state_ptr(builder, self, state, "random_state") - builder.call(rng_f, [mt_state_ptr, dice_ptr]) + rand_state_ptr = ctx.get_random_state_ptr(builder, self, state, params) + rng_f = ctx.get_uniform_dist_function_by_state(rand_state_ptr) + builder.call(rng_f, [rand_state_ptr, dice_ptr]) dice = builder.load(dice_ptr) sum_ptr = builder.alloca(ctx.float_ty) builder.store(ctx.float_ty(-0.0), sum_ptr) diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index 8d82a3556fe..e04beea9d19 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -44,7 +44,7 @@ import numbers import types import warnings -from enum import IntEnum +from enum import IntFlag from math import e, pi, sqrt import numpy as np @@ -477,11 +477,15 @@ def derivative(self, input=None, output=None, context=None): return self._get_current_parameter_value(SLOPE, context) - def _is_identity(self, context=None): - return ( - self.parameters.slope._get(context) == 1 - and self.parameters.intercept._get(context) == 0 - ) + def _is_identity(self, context=None, defaults=False): + if defaults: + slope = self.defaults.slope + intercept = self.defaults.intercept + else: + slope = self.parameters.slope._get(context) + intercept = self.parameters.intercept._get(context) + + return slope == 1 and intercept == 0 # ********************************************************************************************************************** @@ -2207,7 +2211,7 @@ class Parameters(TransferFunction.Parameters): scale = Parameter(1.0, modulable=True) offset = Parameter(0.0, modulable=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) bounds = (None, None) @tc.typecheck @@ -2249,8 +2253,8 @@ def _gen_llvm_transfer(self, builder, index, ctx, vi, vo, params, state, *, tags offset = pnlvm.helpers.load_extract_scalar_array_one(builder, offset_ptr) rvalp = builder.alloca(ptri.type.pointee) - rand_state_ptr = pnlvm.helpers.get_state_ptr(builder, self, state, "random_state") - normal_f = ctx.import_llvm_function("__pnl_builtin_mt_rand_normal") + rand_state_ptr = ctx.get_random_state_ptr(builder, self, state, params) + normal_f = ctx.get_normal_dist_function_by_state(rand_state_ptr) builder.call(normal_f, [rand_state_ptr, rvalp]) rval = builder.load(rvalp) @@ -3278,8 +3282,11 @@ def param_function(owner, function): receiver_len = len(owner.receiver.defaults.variable) return function(sender_len, receiver_len) - def _is_identity(self, context=None): - matrix = self.parameters.matrix._get(context) + def _is_identity(self, context=None, defaults=False): + if defaults: + matrix = self.defaults.matrix + else: + matrix = self.parameters.matrix._get(context) # if matrix is not an np array with at least one dimension, # this isn't an identity matrix @@ -3364,7 +3371,7 @@ def _is_identity(self, context=None): COMBINE_COSTS_FUNCTION] -class CostFunctions(IntEnum): +class CostFunctions(IntFlag): """Options for selecting constituent cost functions to be used by a `TransferWithCosts` Function. These can be used alone or in combination with one another, by enabling or disabling each using the @@ -3402,7 +3409,7 @@ class CostFunctions(IntEnum): ADJUSTMENT = 1 << 2 DURATION = 1 << 3 ALL = INTENSITY | ADJUSTMENT | DURATION - DEFAULTS = INTENSITY + DEFAULTS = NONE TRANSFER_FCT = 'transfer_fct' @@ -4088,9 +4095,18 @@ def _function(self, return intensity - def _is_identity(self, context=None): - return (self.parameters.transfer_fct.get()._is_identity(context) and - self.parameters.enabled_cost_functions.get(context) == CostFunctions.NONE) + def _is_identity(self, context=None, defaults=False): + transfer_fct = self.parameters.transfer_fct.get() + + if defaults: + enabled_cost_functions = self.defaults.enabled_cost_functions + else: + enabled_cost_functions = self.parameters.enabled_cost_functions.get(context) + + return ( + transfer_fct._is_identity(context, defaults=defaults) + and enabled_cost_functions == CostFunctions.NONE + ) @tc.typecheck def assign_costs(self, cost_functions: tc.any(CostFunctions, list), execution_context=None): diff --git a/psyneulink/core/components/functions/stateful/integratorfunctions.py b/psyneulink/core/components/functions/stateful/integratorfunctions.py index 82899245901..80144c6897e 100644 --- a/psyneulink/core/components/functions/stateful/integratorfunctions.py +++ b/psyneulink/core/components/functions/stateful/integratorfunctions.py @@ -1213,6 +1213,7 @@ def _function(self, return self.convert_output_type(adjusted_value, variable) # MODIFIED 6/21/19 END + S_MINUS_L = 's-l' L_MINUS_S = 'l-s' OPERATIONS = {PRODUCT, SUM, S_MINUS_L, L_MINUS_S} @@ -2366,7 +2367,7 @@ class Parameters(IntegratorFunction.Parameters): time_step_size = Parameter(1.0, modulable=True) previous_time = Parameter(None, initializer='starting_point', pnl_internal=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) enable_output_type_conversion = Parameter( False, stateful=False, @@ -2484,9 +2485,9 @@ def _gen_llvm_integrate(self, builder, index, ctx, vi, vo, params, state): threshold = self._gen_llvm_load_param(ctx, builder, params, index, THRESHOLD) time_step_size = self._gen_llvm_load_param(ctx, builder, params, index, TIME_STEP_SIZE) - random_state = pnlvm.helpers.get_state_ptr(builder, self, state, "random_state") + random_state = ctx.get_random_state_ptr(builder, self, state, params) rand_val_ptr = builder.alloca(ctx.float_ty) - rand_f = ctx.import_llvm_function("__pnl_builtin_mt_rand_normal") + rand_f = ctx.get_normal_dist_function_by_state(random_state) builder.call(rand_f, [random_state, rand_val_ptr]) rand_val = builder.load(rand_val_ptr) @@ -2858,7 +2859,7 @@ class Parameters(IntegratorFunction.Parameters): initializer = Parameter([0], initalizer='variable', stateful=True) angle_function = Parameter(None, stateful=False, loggable=False) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) enable_output_type_conversion = Parameter( False, stateful=False, @@ -3390,7 +3391,7 @@ class Parameters(IntegratorFunction.Parameters): starting_point = 0.0 previous_time = Parameter(0.0, initializer='starting_point', pnl_internal=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) enable_output_type_conversion = Parameter( False, stateful=False, diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index baea3d3f2c3..910e49545ba 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -1094,7 +1094,7 @@ class Parameters(StatefulFunction.Parameters): noise = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) max_entries = Parameter(1000) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) distance_function = Parameter(Distance(metric=COSINE), stateful=False, loggable=False) selection_function = Parameter(OneHot(mode=MIN_INDICATOR), stateful=False, loggable=False) distance = Parameter(0, stateful=True, read_only=True) @@ -1419,7 +1419,7 @@ def _function(self, self.parameters.memory_field_shapes.set([item.shape for item in variable], context=context, override=True) # Retrieve entry from memory that best matches variable - if retrieval_prob == 1.0 or (retrieval_prob > 0.0 and retrieval_prob > random_state.rand()): + if retrieval_prob == 1.0 or (retrieval_prob > 0.0 and retrieval_prob > random_state.uniform()): entry = self.get_memory(variable, distance_field_weights, context).copy() else: # QUESTION: SHOULD IT RETURN ZERO VECTOR OR NOT RETRIEVE AT ALL (LEAVING VALUE AND OutputPort FROM LAST TRIAL)? @@ -1428,7 +1428,7 @@ def _function(self, entry = self.uniform_entry(0, context) # Store variable in memory - if storage_prob == 1.0 or (storage_prob > 0.0 and storage_prob > random_state.rand()): + if storage_prob == 1.0 or (storage_prob > 0.0 and storage_prob > random_state.uniform()): self._store_memory(variable, context) return entry @@ -1601,12 +1601,12 @@ def format_for_storage(entry:np.ndarray) -> np.ndarray: matches = [m for m in existing_entries if len(m) and self._is_duplicate(entry, m, field_weights, context)] # If duplicate entries are not allowed and entry matches any existing entries, don't store - if matches and self.duplicate_entries_allowed == False: + if matches and self.duplicate_entries_allowed is False: storage_succeeded = False # If duplicate_entries_allowed is True or OVERWRITE, replace value for matching entry: # FIX: SHOULD BE OVERWRITE or False - elif matches and self.duplicate_entries_allowed is OVERWRITE: + elif matches and self.duplicate_entries_allowed == OVERWRITE: if len(matches)>1: # If there is already more than one duplicate, raise error as it is not clear what to overwrite raise FunctionError(f"Attempt to store item ({entry}) in {self.name} " @@ -2154,7 +2154,7 @@ class Parameters(StatefulFunction.Parameters): noise = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) max_entries = Parameter(1000) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) distance_function = Parameter(Distance(metric=COSINE), stateful=False, loggable=False) selection_function = Parameter(OneHot(mode=MIN_INDICATOR), stateful=False, loggable=False) @@ -2243,8 +2243,8 @@ def _get_state_initializer(self, context): def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): # PRNG - rand_struct = pnlvm.helpers.get_state_ptr(builder, self, state, "random_state") - uniform_f = ctx.import_llvm_function("__pnl_builtin_mt_rand_double") + rand_struct = ctx.get_random_state_ptr(builder, self, state, params) + uniform_f = ctx.get_uniform_dist_function_by_state(rand_struct) # Ring buffer buffer_ptr = pnlvm.helpers.get_state_ptr(builder, self, state, "ring_memory") @@ -2575,7 +2575,7 @@ def _function(self, self.parameters.val_size._set(len(val), context) # Retrieve value from current dict with key that best matches key - if retrieval_prob == 1.0 or (retrieval_prob > 0.0 and retrieval_prob > random_state.rand()): + if retrieval_prob == 1.0 or (retrieval_prob > 0.0 and retrieval_prob > random_state.uniform()): memory = self.get_memory(key, context) else: # QUESTION: SHOULD IT RETURN 0's VECTOR OR NOT RETRIEVE AT ALL (LEAVING VALUE & OutputPort FROM LAST TRIAL)? @@ -2592,7 +2592,7 @@ def _function(self, # TODO: does val need noise? key += noise[KEYS] - if storage_prob == 1.0 or (storage_prob > 0.0 and storage_prob > random_state.rand()): + if storage_prob == 1.0 or (storage_prob > 0.0 and storage_prob > random_state.uniform()): self._store_memory(variable, context) # Return 3d array with keys and vals as lists @@ -2711,7 +2711,7 @@ def _store_memory(self, memory:tc.any(list, np.ndarray), context): matches = [k for k in d[KEYS] if key==list(k)] # If dupliciate keys are not allowed and key matches any existing keys, don't store - if matches and self.duplicate_keys == False: + if matches and self.duplicate_keys is False: storage_succeeded = False # If dupliciate_keys is specified as OVERWRITE, replace value for matching key: diff --git a/psyneulink/core/components/functions/userdefinedfunction.py b/psyneulink/core/components/functions/userdefinedfunction.py index d1348f79bd8..bb8b6ecc5b3 100644 --- a/psyneulink/core/components/functions/userdefinedfunction.py +++ b/psyneulink/core/components/functions/userdefinedfunction.py @@ -14,12 +14,10 @@ from inspect import signature, _empty, getsourcelines import ast -from psyneulink.core.components.component import ComponentError from psyneulink.core.components.functions.function import FunctionError, Function_Base -from psyneulink.core.globals.context import ContextFlags from psyneulink.core.globals.keywords import \ - ADDITIVE_PARAM, CONTEXT, CUSTOM_FUNCTION, EXECUTION_ID, MULTIPLICATIVE_PARAM, OWNER, PARAMS, \ - PARAMETER_PORT_PARAMS, SELF, USER_DEFINED_FUNCTION, USER_DEFINED_FUNCTION_TYPE + CONTEXT, CUSTOM_FUNCTION, OWNER, PARAMS, \ + SELF, USER_DEFINED_FUNCTION, USER_DEFINED_FUNCTION_TYPE from psyneulink.core.globals.parameters import Parameter from psyneulink.core.globals.preferences import is_pref_set from psyneulink.core.globals.utilities import iscompatible @@ -28,6 +26,31 @@ __all__ = ['UserDefinedFunction'] + +class _ExpressionVisitor(ast.NodeVisitor): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.vars = set() + self.functions = set() + + def visit_Name(self, node): + if node.id not in __builtins__: + self.vars.add(node.id) + + def visit_Call(self, node): + try: + # gives top level module name if module function used + func_id = node.func.value.id + except AttributeError: + func_id = node.func.id + + if func_id not in __builtins__: + self.functions.add(func_id) + + for c in ast.iter_child_nodes(node): + self.visit(c) + + class UserDefinedFunction(Function_Base): """UserDefinedFunction( \ custom_function=None, \ @@ -39,7 +62,8 @@ class UserDefinedFunction(Function_Base): .. _UDF_Description: - A UserDefinedFunction (UDF) is used to "wrap" a Python function or method, including a lamdba function, + A UserDefinedFunction (UDF) is used to "wrap" a Python function or method, lamdba function, + or an expression written in string format as a PsyNeuLink `Function `, so that it can be used as the `function ` of a `Component `. This is done automatically if a Python function or method is assigned as the `function ` attribute of a Component. A Python function or method can also be wrapped explicitly, @@ -49,13 +73,15 @@ class UserDefinedFunction(Function_Base): .. _UDF_Variable: - * It must have **at least one argument** (that can be a positional or a keyword argument); this will be treated + * If providing a Python function, method, or lambda function, it must have **at least one argument** (that can be a positional or a keyword argument); this will be treated as the `variable ` attribute of the UDF's `function `. When the UDF calls the function or method that it wraps, an initial attempt is made to do so with **variable** as the name of the first argument; if that fails, it is called positionally. The argument is always passed as a 2d np.array, that may contain one or more items (elements in axis 0), depending upon the Component to which the UDF is assigned. It is the user's responsibility to insure that the number of items expected in the first argument of the function or method is compatible with the circumstances in which it will be called. + If providing a string expression, **variable** is optional. However, if **variable** is not included in + the expression, the resulting UDF will not use **variable** at all in its calculation. .. .. _UDF_Additional_Arguments: @@ -166,6 +192,17 @@ class UserDefinedFunction(Function_Base): that be used to modify their values by `ControlSignals ` (see `example below <_ UDF_Control_Signal_Example>`). + .. _UDF_String_Expression_Function_Examples: + + The **function** argument may also be an expression written as a string:: + + >>> my_mech = pnl.ProcessingMechanism(function='sum(variable, 2)') + >>> my_mech.execute(input=[1]) + array([[3]]) + + This option is primarily designed for compatibility with other packages that use string expressions as + their main description of computation and may be less flexible or reliable than the previous styles. + .. _UDF_Explicit_Creation_Examples: In all of the examples above, a UDF was automatically created for the functions assigned to the Mechanism. A UDF @@ -419,6 +456,7 @@ def __init__(self, params=None, owner=None, prefs: tc.optional(is_pref_set) = None, + stateful_parameter=None, **kwargs): def get_cust_fct_args(custom_function): @@ -437,6 +475,26 @@ def get_cust_fct_args(custom_function): custom_function, self.__class__.__name__ ) ) + except TypeError: + v = _ExpressionVisitor() + v.visit(ast.parse(custom_function)) + parameters = v.vars.union(v.functions) + + if 'variable' in parameters: + parameters.remove('variable') + variable = kwargs['variable'] + else: + variable = None + + args = {} + for p in parameters: + if '.' not in p: # assume . indicates external module function call + try: + args[p] = kwargs[p] + except KeyError: + args[p] = None + + return variable, args, args args = {} defaults = {} @@ -507,6 +565,13 @@ def get_cust_fct_args(custom_function): context = self.cust_fct_params[CONTEXT] del self.cust_fct_params[CONTEXT] + if stateful_parameter is not None: + if stateful_parameter not in self.cust_fct_params: + raise FunctionError( + f'{stateful_parameter} specified as integration parameter is not a parameter of {custom_function}' + ) + self.stateful_parameter = stateful_parameter + # Assign variable to default_variable if default_variable was not specified if default_variable is None: default_variable = cust_fct_variable @@ -556,6 +621,8 @@ def _function(self, variable, context=None, **kwargs): # First check for value passed in params as runtime param: if PARAMS in kwargs and kwargs[PARAMS] is not None and param in kwargs[PARAMS]: self.cust_fct_params[param] = kwargs[PARAMS][param] + elif param in kwargs: + self.cust_fct_params[param] = kwargs[param] else: # Otherwise, get current value from ParameterPort (in case it is being modulated by ControlSignal(s) self.cust_fct_params[param] = self._get_current_parameter_value(param, context) @@ -577,9 +644,15 @@ def _function(self, variable, context=None, **kwargs): try: # Try calling with full list of args (including context and params) value = self.custom_function(variable, **kwargs) - except TypeError: - # Try calling with just variable and cust_fct_params - value = self.custom_function(variable, **call_params) + except TypeError as e: + if "'str' object is not callable" != str(e): + # Try calling with just variable and cust_fct_params + value = self.custom_function(variable, **call_params) + else: + value = eval(self.custom_function, kwargs) + + if self.stateful_parameter is not None and not self.is_initializing: + getattr(self.parameters, self.stateful_parameter)._set(value, context) return self.convert_output_type(value) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index 4d443adde2a..b46b2b5a5dd 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -378,7 +378,6 @@ .. figure:: _static/Mechanism_Ports_fig.svg :alt: Mechanism Ports - :scale: 75 % :align: left **Schematic of a Mechanism showing its three types of Ports** (`InputPort`, `ParameterPort` and `OutputPort`). @@ -646,9 +645,15 @@ * **input_ports** / *INPUT_PORTS* - a list specifying the Mechanism's input_ports (see `InputPort_Specification` for details of specification). .. + * **input_labels** / *INPUT_LABEL_DICTS* - a dict specifying labels that can be used as inputs + (see `Mechanism_Labels_Dicts` for details of specification). + .. * **output_ports** / *OUTPUT_PORTS* - specifies specialized OutputPorts required by a Mechanism subclass (see `OutputPort_Specification` for details of specification). .. + * **output_labels** / *OUTPUT_LABEL_DICTS* - a dict specifying labels that can be for reporting outputs + (see `Mechanism_Labels_Dicts` for details of specification). + .. COMMENT: * **monitor_for_control** / *MONITOR_FOR_CONTROL* - specifies which of the Mechanism's OutputPorts is monitored by the `controller` for the Composition to which the Mechanism belongs (see `specifying monitored OutputPorts @@ -805,7 +810,7 @@ 'red' Labels may be used to visualize the input and outputs of Mechanisms in a Composition with the **show_structure** option -of the Composition's `show_graph ` method with the keyword **LABELS**. +of the Composition's `show_graph`show_graph ` method with the keyword **LABELS**. >>> C.show_graph(show_mechanism_structure=pnl.LABELS) #doctest: +SKIP @@ -1136,7 +1141,7 @@ class MechParamsDict(UserDict): def _input_port_variables_getter(owning_component=None, context=None): try: return [input_port.parameters.variable._get(context) for input_port in owning_component.input_ports] - except TypeError: + except (AttributeError, TypeError): return None @@ -1231,6 +1236,12 @@ class Mechanism_Base(Mechanism): the number and, if specified, their values must be compatible with any specifications made for **default_variable** or **size** (see `Mechanism_InputPorts` for additional details). + input_labels : dict + specifies labels (strings) that can be used to specify numeric values as input to the Mechanism; + entries must be either label:value pairs, or sub-dictionaries containing label:value pairs, + in which each label (key) specifies a string associated with a value for the corresponding InputPort(s) + of the Mechanism; see `Mechanism_Labels_Dicts` for additional details. + function : Function : default Linear specifies the function used to generate the Mechanism's `value `; can be a PsyNeuLink `Function` or a `UserDefinedFunction`; it `value ` is used to determine @@ -1241,6 +1252,12 @@ class Mechanism_Base(Mechanism): the `value ` of which is assigned the first item in the outermost dimension (axis 0) of the Mechanism's `value ` (see `Mechanism_OutputPorts` for additional details). + output_labels : dict + specifies labels (strings) that can be reported in place of numeric values as output(s) of the Mechanism; + entries must be either label:value pairs, or sub-dictionaries containing label:value pairs, + in which each label (key) specifies a string associated with a value for the OutputPort(s) of the + Mechanism; see `Mechanism_Labels_Dicts` for additional details. + Attributes ---------- @@ -1272,8 +1289,8 @@ class Mechanism_Base(Mechanism): input_labels_dict : dict contains entries that are either label:value pairs, or sub-dictionaries containing label:value pairs, - in which each label (key) specifies a string associated with a value for the InputPort(s) of the - Mechanism; see `Mechanism_Labels_Dicts` for additional details. + in which each label (key) specifies a string associated with a value for the corresponding InputPort(s) + of the Mechanism; see `Mechanism_Labels_Dicts` for additional details. input_labels : list[str] contains the labels corresponding to the value(s) of the InputPort(s) of the Mechanism. If the current value @@ -1380,20 +1397,20 @@ class Mechanism_Base(Mechanism): projections : ContentAddressableList a list of all of the Mechanism's `Projections `, composed from the - `path_afferents ` of all of its `input_ports `, + `path_afferents ` of all of its `input_ports `, the `mod_afferents` of all of its `input_ports `, `parameter_ports `, and `output_ports `, and the `efferents ` of all of its `output_ports `. afferents : ContentAddressableList a list of all of the Mechanism's afferent `Projections `, composed from the - `path_afferents ` of all of its `input_ports `, + `path_afferents ` of all of its `input_ports `, and the `mod_afferents` of all of its `input_ports `, `parameter_ports `, and `output_ports `., path_afferents : ContentAddressableList a list of all of the Mechanism's afferent `PathwayProjections `, composed from the - `path_afferents ` attributes of all of its `input_ports + `path_afferents ` attributes of all of its `input_ports `. mod_afferents : ContentAddressableList @@ -1651,8 +1668,10 @@ def __init__(self, default_variable=None, size=None, input_ports=None, + input_labels=None, function=None, output_ports=None, + output_labels=None, params=None, name=None, prefs=None, @@ -1717,6 +1736,8 @@ def __init__(self, name=name, input_ports=input_ports, output_ports=output_ports, + input_labels_dict=input_labels, + output_labels_dict=output_labels, **kwargs ) @@ -1783,11 +1804,9 @@ def _handle_default_variable(self, default_variable=None, size=None, input_ports default_variable = default_variable_from_input_ports else: raise MechanismError( - 'default variable determined from the specified input_ports spec ({0}) ' - 'is not compatible with the default variable determined from size parameter ({1})'. - format(default_variable_from_input_ports, size_variable, - ) - ) + f'Default variable for {self.name} determined from the specified input_ports spec ' + f'({default_variable_from_input_ports}) is not compatible with the default variable ' + f'determined from size parameter ({size_variable}).') else: # do not pass input_ports variable as default_variable, fall back to size specification pass @@ -1795,11 +1814,9 @@ def _handle_default_variable(self, default_variable=None, size=None, input_ports if input_ports_variable_was_specified: if not iscompatible(self._parse_arg_variable(default_variable), default_variable_from_input_ports): raise MechanismError( - 'Default variable determined from the specified input_ports spec ({0}) for {1} ' - 'is not compatible with its specified default variable ({2})'.format( - default_variable_from_input_ports, self.name, default_variable - ) - ) + f'Default variable for {self.name} determined from the specified input_ports spec ' + f'({default_variable_from_input_ports}) is not compatible with its specified ' + f'default variable ({default_variable}).') else: # do not pass input_ports variable as default_variable, fall back to default_variable specification pass @@ -1814,7 +1831,7 @@ def _handle_arg_input_ports(self, input_ports): Returns ------- A, B where - A is an defaults.variable-like object + A is a defaults.variable-like object B is True if **input_ports** contained an explicit variable specification, False otherwise """ @@ -1840,8 +1857,8 @@ def _handle_arg_input_ports(self, input_ports): default_variable_from_input_ports.append(InputPort.defaults.variable) continue else: - raise MechanismError("PROGRAM ERROR: Problem parsing {} specification ({}) for {}". - format(InputPort.__name__, s, self.name)) + raise MechanismError(f"PROGRAM ERROR: Problem parsing {InputPort.__name__} specification ({s}) " + f"for {self.name}.") mech_variable_item = None @@ -2409,7 +2426,7 @@ def execute(self, for i in range(len(item.shape))) for item in return_value))): - return return_value + return return_value else: converted_to_2d = convert_to_np_array(return_value, dimension=2) # If return_value is a list of heterogenous elements, return as is @@ -2503,7 +2520,7 @@ def execute(self, all(item.shape[i]==value[0].shape[0] for i in range(len(item.shape))) for item in value))): - pass + pass else: converted_to_2d = convert_to_np_array(value, dimension=2) # If return_value is a list of heterogenous elements, return as is @@ -2662,7 +2679,6 @@ def initialize(self, value, context=None): self.parameters.value.set(np.atleast_1d(value), context, override=True) self._update_output_ports(context=context) - def _parse_runtime_params(self, runtime_params, context): """Move Port param specifications and nested Project-specific specifications into sub-dicts. @@ -2915,24 +2931,26 @@ def _gen_llvm_param_ports_for_obj(self, obj, params_in, ctx, builder, # Few extra copies will be eliminated by the compiler. builder.store(builder.load(params_in), params_out) - # Filter out param ports without corresponding params for this function - param_ports = [p for p in self._parameter_ports if p.name in obj.llvm_param_ids] + # This should be faster than 'obj._get_compilation_params' + compilation_params = (getattr(obj.parameters, p_id, None) for p_id in obj.llvm_param_ids) + # Filter out param ports without corresponding param for this function + param_ports = [self._parameter_ports[param] for param in compilation_params if param in self._parameter_ports] def _get_output_ptr(b, i): ptr = pnlvm.helpers.get_param_ptr(b, obj, params_out, - param_ports[i].name) + param_ports[i].source.name) return b, ptr def _fill_input(b, p_input, i): - param_in_ptr = pnlvm.helpers.get_param_ptr(b, obj, params_in, - param_ports[i].name) + param_ptr = pnlvm.helpers.get_param_ptr(b, obj, params_in, + param_ports[i].source.name) # Parameter port inputs are {original parameter, [modulations]}, - # fill in the first one. + # here we fill in the first one. data_ptr = builder.gep(p_input, [ctx.int32_ty(0), ctx.int32_ty(0)]) - assert data_ptr.type == param_in_ptr.type, \ + assert data_ptr.type == param_ptr.type, \ "Mishandled modulation type for: {} in '{}' in '{}'".format( param_ports[i].name, obj.name, self.name) - b.store(b.load(param_in_ptr), data_ptr) + b.store(b.load(param_ptr), data_ptr) return b builder = self._gen_llvm_ports(ctx, builder, param_ports, "_parameter_ports", @@ -2942,20 +2960,20 @@ def _fill_input(b, p_input, i): def _gen_llvm_output_port_parse_variable(self, ctx, builder, mech_params, mech_state, value, port): - port_spec = port._variable_spec - if port_spec == OWNER_VALUE: - return value - elif isinstance(port_spec, tuple) and port_spec[0] == OWNER_VALUE: - index = port_spec[1]() if callable(port_spec[1]) else port_spec[1] - - assert index < len(value.type.pointee) - return builder.gep(value, [ctx.int32_ty(0), ctx.int32_ty(index)]) - elif port_spec == OWNER_EXECUTION_COUNT: - execution_count = pnlvm.helpers.get_state_ptr(builder, self, mech_state, "execution_count") - return execution_count - else: - #TODO: support more spec options - assert False, "Unsupported OutputPort spec: {} ({})".format(port_spec, value.type) + port_spec = port._variable_spec + if port_spec == OWNER_VALUE: + return value + elif isinstance(port_spec, tuple) and port_spec[0] == OWNER_VALUE: + index = port_spec[1]() if callable(port_spec[1]) else port_spec[1] + + assert index < len(value.type.pointee) + return builder.gep(value, [ctx.int32_ty(0), ctx.int32_ty(index)]) + elif port_spec == OWNER_EXECUTION_COUNT: + execution_count = pnlvm.helpers.get_state_ptr(builder, self, mech_state, "execution_count") + return execution_count + else: + #TODO: support more spec options + assert False, "Unsupported OutputPort spec: {} ({})".format(port_spec, value.type) def _gen_llvm_output_ports(self, ctx, builder, value, mech_params, mech_state, mech_in, mech_out): @@ -3465,19 +3483,6 @@ def port_cell(port, include_function:bool=False, include_value:bool=False, use_l elif output_fmt == 'jupyter': return m - @tc.typecheck - def _get_port_name(self, port:Port): - if isinstance(port, InputPort): - port_type = InputPort.__name__ - elif isinstance(port, ParameterPort): - port_type = ParameterPort.__name__ - elif isinstance(port, OutputPort): - port_type = OutputPort.__name__ - else: - assert False, f'Mechanism._get_port_name() must be called with an ' \ - f'{InputPort.__name__}, {ParameterPort.__name__} or {OutputPort.__name__}' - return port_type + '-' + port.name - def plot(self, x_range=None): """Generate a plot of the Mechanism's `function ` using the specified parameter values (see `DDM.plot ` for details of the animated DDM plot). @@ -3515,6 +3520,22 @@ def plot(self, x_range=None): plt.plot(x_space, self.function(x_space)[0], lw=3.0, c='r') plt.show() + # def remove_projection(self, projection): + # pass + + @tc.typecheck + def _get_port_name(self, port:Port): + if isinstance(port, InputPort): + port_type = InputPort.__name__ + elif isinstance(port, ParameterPort): + port_type = ParameterPort.__name__ + elif isinstance(port, OutputPort): + port_type = OutputPort.__name__ + else: + assert False, f'Mechanism._get_port_name() must be called with an ' \ + f'{InputPort.__name__}, {ParameterPort.__name__} or {OutputPort.__name__}' + return port_type + '-' + port.name + @tc.typecheck @handle_external_context() def add_ports(self, ports, update_variable=True, context=None): @@ -3595,7 +3616,7 @@ def add_ports(self, ports, update_variable=True, context=None): context=context) for port in instantiated_input_ports: if port.name is port.componentName or port.componentName + '-' in port.name: - port._assign_default_port_Name() + port._assign_default_port_Name() # self._instantiate_function(function=self.function) if output_ports: instantiated_output_ports = _instantiate_output_ports(self, output_ports, context=context) @@ -3882,8 +3903,11 @@ def output_labels(self): def get_output_labels(self, context=None): if self.output_labels_dict: return self._get_port_value_labels(OutputPort, context) - else: + elif context: return self.get_output_values(context) + else: + return self.output_values + @property def ports(self): diff --git a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py index e4fdf5817c1..d3c8b25f947 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py @@ -21,7 +21,7 @@ * `ControlMechanism_Structure` - `ControlMechanism_Input` - `ControlMechanism_Function` - - 'ControlMechanism_Output` + - `ControlMechanism_Output` - `ControlMechanism_Costs_NetOutcome` * `ControlMechanism_Execution` * `ControlMechanism_Examples` @@ -72,7 +72,7 @@ ControlMechanism can be assigned as the `controller ` for a Composition by specifying it in the **controller** argument of the Composition's constructor, or by using the Composition's `add_controller ` method. A Composition's `controller ` and its associated -Components can be displayed using the Composition's `show_graph ` method with its +Components can be displayed using the Composition's `show_graph ` method with its **show_control** argument assigned as `True`. @@ -144,8 +144,8 @@ * **monitor_for_control** -- a list of `OutputPort specifications `. If the **objective_mechanism** argument is not specified (or is *False* or *None*) then, when the ControlMechanism is - added to a `Composition`, a `MappingProjection` is created for each OutputPort specified to the ControlMechanism's - *OUTCOME* `input_port `. If the **objective_mechanism** `argument + added to a `Composition`, a `MappingProjection` is created from each OutputPort specified to InputPorts + created on the ControlMechanism (see `ControlMechanism_Input` for details). If the **objective_mechanism** `argument ` is specified, then the OutputPorts specified in **monitor_for_control** are assigned to the `ObjectiveMechanism` rather than the ControlMechanism itself (see `ControlMechanism_ObjectiveMechanism` for details). @@ -189,6 +189,18 @@ specify an ObjectiveMechanism with a custom `function ` and weighting of the OutputPorts monitored (see `below ` for additional details). + .. _ControlMechanism_Allow_Probes: + + * **allow_probes** -- this argument allows values of Components of a `nested Composition ` other + than its `OUTPUT ` `Nodes ` to be specified in the **monitor_for_control** + argument of the ControlMechanism's constructor or, if **objective_mechanism** is specified, in the + **monitor** argument of the ObjectiveMechanism's constructor (see above). If the Composition's `allow_probes + ` attribute is False, it is set to *CONTROL*, and only a ControlMechanism can receive + projections from `PROBE ` Nodes of a nested Composition (the current one as well as any others in + the same Composition); if the Composition's `allow_probes ` attribute is True, then it + is left that way, and any node within the Comopsition, including the ControlMechanism, can receive projections from + `PROBE ` Nodes (see `Probes ` for additional details). + The OutputPorts monitored by a ControlMechanism or its `objective_mechanism ` are listed in the ControlMechanism's `monitor_for_control ` attribute (and are the same as those listed in the `monitor ` attribute of the @@ -342,17 +354,21 @@ *Input* ~~~~~~~ -By default, a ControlMechanism has a single (`primary `) `input_port -` that is named *OUTCOME*. If the ControlMechanism has an `objective_mechanism -`, then the *OUTCOME* `input_port ` receives a -single `MappingProjection` from the `objective_mechanism `\\'s *OUTCOME* -OutputPort (see `ControlMechanism_ObjectiveMechanism` for additional details). Otherwise, when the ControlMechanism is -added to a `Composition`, MappingProjections are created that project to the ControlMechanism's *OUTCOME* `input_port -` from each of the OutputPorts specified in the **monitor_for_control** `argument -` of its constructor. The `value ` of the -ControlMechanism's *OUTCOME* InputPort is assigned to its `outcome ` attribute), -and is used as the input to the ControlMechanism's `function ` to determine its -`control_allocation `. +By default, a ControlMechanism has a single `input_port ` named *OUTCOME*. If it has an +`objective_mechanism `, then the *OUTCOME* `input_port +` receives a single `MappingProjection` from the `objective_mechanism +`\\'s *OUTCOME* `OutputPort` (see `ControlMechanism_ObjectiveMechanism` for +additional details). If the ControlMechanism has no `objective_mechanism ` then, +when it is added to a `Composition`, MappingProjections are created from the items specified in `monitor_for_control +` directly to InputPorts on the ControlMechanism (see +`ControlMechanism_Monitor_for_Control` for additional details). The number of InputPorts created, and how the items +listed in `monitor_for_control ` project to them is deterimined by the +ControlMechanism's `outcome_input_ports_option `. All of the Inports +that receive Projections from those items, or the `objective_mechanism ` if +the ControlMechanism has one, are listed in its `outcome_input_ports ` attribute, +and their values in the `outcome ` attribute. The latter is used as the input to the +ControlMechanism's `function ` to determine its `control_allocation +`. .. _ControlMechanism_Function: @@ -410,7 +426,7 @@ that can be used to compute the `combined costs ` of its `control_signals `, a `reconfiguration_cost ` based on their change in value, and a `net_outcome ` (the `value ` of the ControlMechanism's -*OUTCOME* `input_port ` minus its `combined costs `), +*OUTCOME* `InputPort ` minus its `combined costs `), respectively (see `ControlMechanism_Costs_Computation` below for additional details). These methods are used by some subclasses of ControlMechanism (e.g., `OptimizationControlMechanism`) to compute their `control_allocation `. Each method is assigned a default function, but can be assigned a custom @@ -439,6 +455,7 @@ A ControlMechanism is executed using the same sequence of actions as any `Mechanism `, with the following additions. +# FIX: 11/3/21: MODIFY TO INCLUDE POSSIBLITY OF MULTIPLE OUTCOME_INPUT_PORTS The ControlMechanism's `function ` takes as its input the `value ` of its *OUTCOME* `input_port ` (also contained in `outcome `). It uses that to determine the `control_allocation `, which specifies the value @@ -560,31 +577,34 @@ """ -import copy import collections +import copy import itertools -import numpy as np import threading -import typecheck as tc import uuid import warnings +import numpy as np +import typecheck as tc + from psyneulink.core import llvm as pnlvm from psyneulink.core.components.functions.function import Function_Base, is_function_type +from psyneulink.core.components.functions.nonstateful.combinationfunctions import Concatenate from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination -from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base from psyneulink.core.components.mechanisms.mechanism import Mechanism, Mechanism_Base -from psyneulink.core.components.ports.port import Port, _parse_port_spec -from psyneulink.core.components.ports.modulatorysignals.controlsignal import ControlSignal +from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base from psyneulink.core.components.ports.inputport import InputPort +from psyneulink.core.components.ports.modulatorysignals.controlsignal import ControlSignal from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.components.ports.parameterport import ParameterPort +from psyneulink.core.components.ports.port import Port, _parse_port_spec from psyneulink.core.globals.defaults import defaultControlAllocation from psyneulink.core.globals.keywords import \ - AUTO_ASSIGN_MATRIX, CONTROL, CONTROL_PROJECTION, CONTROL_SIGNAL, CONTROL_SIGNALS, \ - EID_SIMULATION, GATING_SIGNAL, INIT_EXECUTE_METHOD_ONLY, NAME, \ + AUTO_ASSIGN_MATRIX, COMBINE, CONTROL, CONTROL_PROJECTION, CONTROL_SIGNAL, CONTROL_SIGNALS, CONCATENATE, \ + EID_SIMULATION, FUNCTION, GATING_SIGNAL, INIT_EXECUTE_METHOD_ONLY, INTERNAL_ONLY, NAME, \ MECHANISM, MULTIPLICATIVE, MODULATORY_SIGNALS, MONITOR_FOR_CONTROL, MONITOR_FOR_MODULATION, \ - OBJECTIVE_MECHANISM, OUTCOME, OWNER_VALUE, PRODUCT, PROJECTION_TYPE, PROJECTIONS, PORT_TYPE + OBJECTIVE_MECHANISM, OUTCOME, OWNER_VALUE, PARAMS, PORT_TYPE, PRODUCT, PROJECTION_TYPE, PROJECTIONS, \ + SEPARATE, SIZE from psyneulink.core.globals.parameters import Parameter from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -621,9 +641,11 @@ def _is_control_spec(spec): else: return False + class ControlMechanismError(Exception): - def __init__(self, error_value): + def __init__(self, error_value, data=None): self.error_value = error_value + self.data = data def validate_monitored_port_spec(owner, spec_list): @@ -656,7 +678,7 @@ def validate_monitored_port_spec(owner, spec_list): if isinstance(spec, type) and issubclass(spec, Mechanism): raise ControlMechanismError( f"Mechanism class ({spec.__name__}) specified in '{MONITOR_FOR_CONTROL}' arg " - f"of {self.name}; it must be an instantiated {Mechanism.__name__} or " + f"of {owner.name}; it must be an instantiated {Mechanism.__name__} or " f"{OutputPort.__name__} of one." ) elif isinstance(spec, Port): @@ -671,7 +693,6 @@ def validate_monitored_port_spec(owner, spec_list): f"it must be an {OutputPort.__name__} or a {Mechanism.__name__}." ) - def _control_mechanism_costs_getter(owning_component=None, context=None): # NOTE: In cases where there is a reconfiguration_cost, that cost is not returned by this method try: @@ -688,9 +709,10 @@ def _control_mechanism_costs_getter(owning_component=None, context=None): return None def _outcome_getter(owning_component=None, context=None): + """Return array of values of outcome_input_ports""" try: - return owning_component.parameters.variable._get(context)[0] - except TypeError: + return np.array([port.parameters.value._get(context) for port in owning_component.outcome_input_ports]) + except (AttributeError, TypeError): return None def _net_outcome_getter(owning_component=None, context=None): @@ -704,6 +726,7 @@ def _net_outcome_getter(owning_component=None, context=None): except TypeError: return [0] + class DefaultAllocationFunction(Function_Base): """Take a single 1d item and return a 2d array with n identical items Takes the default input (a single value in the *OUTCOME* InputPort of the ControlMechanism), @@ -758,15 +781,17 @@ def _gen_llvm_function_body(self, ctx, builder, _1, _2, arg_in, arg_out, *, tags class ControlMechanism(ModulatoryMechanism_Base): """ - ControlMechanism( \ - monitor_for_control=None, \ - objective_mechanism=None, \ - function=Linear, \ - default_allocation=None, \ - control=None, \ - modulation=MULTIPLICATIVE, \ - combine_costs=np.sum, \ - compute_reconfiguration_cost=None, \ + ControlMechanism( \ + monitor_for_control=None, \ + objective_mechanism=None, \ + allow_probes=False, \ + outcome_input_ports_option=SEPARATE \ + function=Linear, \ + default_allocation=None, \ + control=None, \ + modulation=MULTIPLICATIVE, \ + combine_costs=np.sum, \ + compute_reconfiguration_cost=None, \ compute_net_outcome=lambda x,y:x-y) Subclass of `ModulatoryMechanism ` that modulates the parameter(s) of one or more @@ -817,6 +842,19 @@ class ControlMechanism(ModulatoryMechanism_Base): OutputPorts specified in the ControlMechanism's **monitor_for_control** `argument `. + allow_probes : bool : default False + specifies whether Components of a `nested Composition ` that are not OUTPUT + ` `Nodes ` of that Composition can be specified as items in the + ControlMechanism's **monitor_for_control** argument or, if **objective_mechanism** + is specified, in the ObjectiveMechanism's **monitor** argument (see `allow_probes + ` for additional information). + + outcome_input_ports_option : COMBINE, CONCATENATE, SEPARATE : default SEPARATE + if **objective_mechanism** is not specified, this specifies whether `MappingProjections ` + from items specified in **monitor_for_control** are each assigned their own `InputPort` (*SEPARATE*) + or to a single *OUTCOME* InputPort (*CONCATENATE*, *COMBINE*); (see `outcome_input_ports_option + ` for additional details. + function : TransferFunction : default Linear(slope=1, intercept=0) specifies function used to combine values of monitored OutputPorts. @@ -851,15 +889,39 @@ class ControlMechanism(ModulatoryMechanism_Base): Attributes ---------- + monitor_for_control : List[OutputPort] + each item is an `OutputPort` monitored by the ControlMechanism or its `objective_mechanism + ` if that is specified (see `ControlMechanism_Monitor_for_Control`); + in the latter case, the list returned is ObjectiveMechanism's `monitor ` attribute. + objective_mechanism : ObjectiveMechanism `ObjectiveMechanism` that monitors and evaluates the values specified in the ControlMechanism's **objective_mechanism** argument, and transmits the result to the ControlMechanism's *OUTCOME* `input_port `. - monitor_for_control : List[OutputPort] - each item is an `OutputPort` monitored by the ControlMechanism or its `objective_mechanism - ` if that is specified (see `ControlMechanism_Monitor_for_Control`); - in the latter case, the list returned is ObjectiveMechanism's `monitor ` attribute. + allow_probes : bool + indicates status of the `allow_probes ` attribute of the Composition + to which the ControlMechanism belongs. If False, items specified in the `monitor_for_control + ` are all `OUTPUT ` `Nodes ` + of that Composition. If True, they may be `INPUT ` or `INTERNAL ` + `Nodes ` of `nested Composition ` (see `allow probes + ` and `Composition_Probes` for additional information). + + outcome_input_ports_option : , SEPARATE, COMBINE, or CONCATENATE + determines how items specified in `monitor_for_control ` project to + the ControlMechanism if not `objective_mechanism ` is specified. If + *SEPARATE* is specified (the default), the `Projection` from each item specified in `monitor_for_control + ` is assigned its own `InputPort`. All of the InputPorts are assigned + to a list in the ControlMechanism's `outcome_input_ports ` attribute. + If *CONCATENATE* or *COMBINE* is specified, all of the projections are assigned to a single InputPort, named + *OUTCOME*. If *COMBINE* is specified, the *OUTCOME* InputPort is assigned `LinearCombination` as its + `function `, which sums the `values ` of the projections to it (all of + which must have the same dimension), to produce a single array (this is the default behavior for multiple + Projections to a single InputPort; see InputPort `function `). If *CONCATENATE* is + specified, the *OUTCOME* InputPort is assigned `Concatenate` as its `function `, which + concatenates the `values ` of its Projections into a single array of length equal to the sum + of their lengths (which need not be the same). In both cases, the *OUTCOME* InputPort is assigned as the only + item in the list of `outcome_input_ports `. monitored_output_ports_weights_and_exponents : List[Tuple(float, float)] each tuple in the list contains the weight and exponent associated with a corresponding OutputPort specified @@ -870,15 +932,25 @@ class ControlMechanism(ModulatoryMechanism_Base): contribution made to its output by each of the values that it monitors (see `ObjectiveMechanism Function `). + COMMENT: + # FIX 11/3/21 DELETED SINCE IT CAN NOW HAVE MANY input_port : InputPort the ControlMechanism's `primary InputPort `, named *OUTCOME*; this receives a `MappingProjection` from the *OUTCOME* `OutputPort ` of `objective_mechanism ` if that is specified; otherwise, it receives MappingProjections - from each of the OutputPorts specifed in `monitor_for_control ` + from each of the OutputPorts specified in `monitor_for_control ` (see `ControlMechanism_Input` for additional details). + COMMENT + + outcome_input_ports : ContentAddressableList + list of the ControlMechanism's `InputPorts ` that receive `Projections ` from + either is `objective_mechanism ` (in which case the list contains + only the ControlMechanism's *OUTCOME* `InputPort `), or the `OutputPorts ` + of the items listed in its `monitor_for_control ` attribute. outcome : 1d array - the `value ` of the ControlMechanism's *OUTCOME* `input_port `. + an array containing the `value ` of each of the ControlMechanism's `outcome_input_ports + `. function : TransferFunction : default Linear(slope=1, intercept=0) determines how the `value `\\s of the `OutputPorts ` specified in the @@ -1027,7 +1099,7 @@ class Parameters(ModulatoryMechanism_Base.Parameters): :type: input_ports - see `input_ports ` + see `input_ports ` :default value: [`OUTCOME`] :type: ``list`` @@ -1046,6 +1118,13 @@ class Parameters(ModulatoryMechanism_Base.Parameters): :type: ``list`` :read only: True + outcome_input_ports_option + see `outcome_input_ports_option ` + + :default value: SEPARATE + :type: ``str`` + :read only: True + net_outcome see `net_outcome ` @@ -1067,7 +1146,7 @@ class Parameters(ModulatoryMechanism_Base.Parameters): :read only: True output_ports - see `output_ports ` + see `output_ports ` :default value: None :type: @@ -1102,6 +1181,7 @@ class Parameters(ModulatoryMechanism_Base.Parameters): modulation = Parameter(MULTIPLICATIVE, pnl_internal=True) objective_mechanism = Parameter(None, stateful=False, loggable=False, structural=True) + outcome_input_ports_option = Parameter(SEPARATE, stateful=False, loggable=False, structural=True) input_ports = Parameter( [OUTCOME], @@ -1183,6 +1263,8 @@ def __init__(self, size=None, monitor_for_control:tc.optional(tc.any(is_iterable, Mechanism, OutputPort))=None, objective_mechanism=None, + allow_probes:bool = False, + outcome_input_ports_option:tc.optional(tc.enum(CONCATENATE, COMBINE, SEPARATE))=None, function=None, default_allocation:tc.optional(tc.any(int, float, list, np.ndarray))=None, control:tc.optional(tc.any(is_iterable, @@ -1202,6 +1284,7 @@ def __init__(self, monitor_for_control = convert_to_list(monitor_for_control) or [] control = convert_to_list(control) or [] + self.allow_probes = allow_probes # For backward compatibility: if kwargs: @@ -1209,14 +1292,35 @@ def __init__(self, args = kwargs.pop(MONITOR_FOR_MODULATION) if args: monitor_for_control.extend(convert_to_list(args)) + + # Only allow one of CONTROL, MODULATORY_SIGNALS OR CONTROL_SIGNALS to be specified + # These are synonyms, but allowing several to be specified and trying to combine the specifications + # can cause problems if different forms of specification are used to refer to the same Component(s) + control_specified = "'control'" if control else '' + modulatory_signals_specified = '' if MODULATORY_SIGNALS in kwargs: args = kwargs.pop(MODULATORY_SIGNALS) if args: - control.extend(convert_to_list(args)) + if control: + modulatory_signals_specified = f"'{MODULATORY_SIGNALS}'" + raise ControlMechanismError(f"Both {control_specified} and {modulatory_signals_specified} " + f"arguments have been specified for {self.name}. " + f"These are synonyms, but only one should be used to avoid " + f"creating unnecessary and/or duplicated Components.") + control = convert_to_list(args) if CONTROL_SIGNALS in kwargs: args = kwargs.pop(CONTROL_SIGNALS) if args: - control.extend(convert_to_list(args)) + if control: + if control_specified and modulatory_signals_specified: + prev_spec = ", ".join([control_specified, modulatory_signals_specified]) + else: + prev_spec = control_specified or modulatory_signals_specified + raise ControlMechanismError(f"Both {prev_spec} and '{CONTROL_SIGNALS}' arguments " + f"have been specified for {self.name}. " + f"These are synonyms, but only one should be used to avoid " + f"creating unnecessary and/or duplicated Components.") + control = convert_to_list(args) function = function or DefaultAllocationFunction @@ -1230,6 +1334,7 @@ def __init__(self, name=name, function=function, monitor_for_control=monitor_for_control, + outcome_input_ports_option=outcome_input_ports_option, control=control, output_ports=control, objective_mechanism=objective_mechanism, @@ -1298,7 +1403,7 @@ def _validate_params(self, request_set, target_set=None, context=None): f"({ctl_spec})") # IMPLEMENTATION NOTE: THIS SHOULD BE MOVED TO COMPOSITION ONCE THAT IS IMPLEMENTED - def _instantiate_objective_mechanism(self, context=None): + def _instantiate_objective_mechanism(self, input_ports=None, context=None): """ # FIX: ??THIS SHOULD BE IN OR MOVED TO ObjectiveMechanism Assign InputPort to ObjectiveMechanism for each OutputPort to be monitored; @@ -1324,6 +1429,12 @@ def _instantiate_objective_mechanism(self, context=None): # GET OutputPorts to Monitor (to specify as or add to ObjectiveMechanism's monitored_output_ports attribute + # FIX: 11/3/21: put OUTCOME InputPort at the end rather than the beginning + # THEN SEE ALL INSTANCES IN COMMENTS OF: "NEED TO MODIFY ONCE OUTCOME InputPorts ARE MOVED" + # Other input_ports are those passed into this method, that are presumed to be for other purposes + # (e.g., used by OptimizationControlMechanism for representing state_features as inputs) + # those are appended after the OUTCOME InputPort # FIX <- change to prepend when refactored + other_input_ports = input_ports or [] monitored_output_ports = [] monitor_for_control = self.monitor_for_control or [] @@ -1345,7 +1456,6 @@ def _instantiate_objective_mechanism(self, context=None): monitored_output_ports.extend([item]) # INSTANTIATE ObjectiveMechanism - # If *objective_mechanism* argument is an ObjectiveMechanism, add monitored_output_ports to it if isinstance(self.objective_mechanism, ObjectiveMechanism): if monitored_output_ports: @@ -1360,6 +1470,8 @@ def _instantiate_objective_mechanism(self, context=None): except (ObjectiveMechanismError, FunctionError) as e: raise ObjectiveMechanismError(f"Error creating {OBJECTIVE_MECHANISM} for {self.name}: {e}") + self.objective_mechanism.control_mechanism = self + # Print monitored_output_ports if self.prefs.verbosePref: print("{0} monitoring:".format(self.name)) @@ -1370,9 +1482,29 @@ def _instantiate_objective_mechanism(self, context=None): self.monitored_output_ports.index(port)][EXPONENT_INDEX] print(f"\t{weight} (exp: {weight}; wt: {exponent})") - # Instantiate MappingProjection from ObjectiveMechanism to ControlMechanism + + # INSTANTIATE OUTCOME InputPort on ControlMechanism that receives projection from ObjectiveMechanism + + # Get size of ObjectiveMechanism's OUTCOME OutputPort, and then append sizes of other any InputPorts passed in + outcome_input_port_size = self.objective_mechanism.output_ports[OUTCOME].value.size + outcome_input_port = {SIZE:outcome_input_port_size, + NAME:OUTCOME, + PARAMS:{INTERNAL_ONLY:True}} + other_input_port_value_sizes, _ = self._handle_arg_input_ports(other_input_ports) + input_port_value_sizes = [outcome_input_port_size] + other_input_port_value_sizes + input_ports = [outcome_input_port] + other_input_ports + super()._instantiate_input_ports(context=context, + input_ports=input_ports, + reference_value=input_port_value_sizes) + + # Assign OUTCOME InputPort to ControlMechanism's list of outcome_input_ports (in this case, it is the only one) + self.outcome_input_ports.append(self.input_ports[OUTCOME]) + + # FIX: 11/3/21: ISN'T THIS DONE IN super()_instantiate_input_ports BASED ON OUTCOME InputPort specification? + # (or shouldn't it be?) PRESUMABLY THE ONES FOR other_input_ports ARE + # INSTANTIATE MappingProjection from ObjectiveMechanism to ControlMechanism projection_from_objective = MappingProjection(sender=self.objective_mechanism, - receiver=self, + receiver=self.input_ports[OUTCOME], matrix=AUTO_ASSIGN_MATRIX, context=context) @@ -1392,22 +1524,150 @@ def _instantiate_objective_mechanism(self, context=None): self._objective_projection = projection_from_objective self.parameters.monitor_for_control._set(self.monitored_output_ports, context) - def _instantiate_input_ports(self, context=None): + def _instantiate_input_ports(self, input_ports=None, context=None): + """Instantiate input_ports for items being monitored and evaluated, and ObjectiveMechanism if specified + + If **objective_mechanism** is specified: + - instantiate ObjectiveMechanism, which also instantiates an OUTCOME InputPort + and a MappingProjection to it from the ObjectiveMechanisms OUTCOME OutputPort + + If **monitor_for_control** is specified: + - it is used to construct an InputPort from each sender specified in it, + and a corresponding MappingProjection from the sender to that InputPort; + - each InputPort is named using an uppercase version of the sender's name + + If nothing is specified, a default OUTCOME InputPort is instantiated with no projections to it + """ + + other_input_ports = input_ports or [] - super()._instantiate_input_ports(context=context) - self.input_port.name = OUTCOME - self.input_port.name = OUTCOME + # FIX 11/3/21: THIS SHOULD BE MADE A PARAMETER + self.outcome_input_ports = ContentAddressableList(component_type=OutputPort) - # If objective_mechanism is specified, instantiate it, - # including Projections to it from monitor_for_control + # If ObjectiveMechanism is specified, instantiate it and OUTCOME InputPort that receives projection from it if self.objective_mechanism: - self._instantiate_objective_mechanism(context=context) + # This instantiates an OUTCOME InputPort sized to match the ObjectiveMechanism's OUTCOME OutputPort + # Note: in this case, any items specified in monitor_for_control are passed to the **monitor** argument + # of the objective_mechanism's constructor + self._instantiate_objective_mechanism(input_ports, context=context) - # Otherwise, instantiate Projections from monitor_for_control to ControlMechanism + # If no ObjectiveMechanism is specified, but items to monitor are specified, + # assign an outcome_input_port for each item specified elif self.monitor_for_control: + + # Get outcome_input_port_specs without including specifications of Projections to them, as those need to + # be constructed and specified as aux_components (below) for validation and activation by Composition + outcome_input_port_specs, outcome_value_sizes, projection_specs \ + = self._parse_monitor_for_control_input_ports(context) + + # Get sizes of input_ports passed in (that are presumably used for other purposes; + # e.g., ones used by OptimizationControlMechanism for simulated inputs or state_features) + other_input_port_value_sizes = self._handle_arg_input_ports(other_input_ports)[0] + + # Construct full list of InputPort specifications and sizes + input_ports = outcome_input_port_specs + other_input_ports + input_port_value_sizes = outcome_value_sizes + other_input_port_value_sizes + super()._instantiate_input_ports(context=context, + input_ports=input_ports, + reference_value=input_port_value_sizes) + # FIX: 11/3/21 NEED TO MODIFY ONCE OUTCOME InputPorts ARE MOVED + self.outcome_input_ports.extend(self.input_ports[:len(outcome_input_port_specs)]) + + # Instantiate Projections to outcome_input_ports from items specified in monitor_for_control + # (list of which were placed in self.aux_components by _parse_monitor_for_control_input_ports) + option = self.outcome_input_ports_option from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection - for sender in convert_to_list(self.monitor_for_control): - self.aux_components.append(MappingProjection(sender=sender, receiver=self.input_ports[OUTCOME])) + from psyneulink.core.components.mechanisms.processing.objectivemechanism import _parse_monitor_specs + + self.aux_components = [] + for i in range(len(projection_specs)): + if option == SEPARATE: + # Each outcome_input_port get its own Projection + outcome_port_index = i + else: + # The single outcome_input_port gets all the Projections + outcome_port_index = 0 + self.aux_components.append(MappingProjection(sender=projection_specs[i], + receiver=self.outcome_input_ports[outcome_port_index])) + + # Nothing has been specified, so just instantiate the default OUTCOME InputPort + else: + super()._instantiate_input_ports(context=context) + self.outcome_input_ports.append(self.input_ports[OUTCOME]) + + def _parse_monitor_for_control_input_ports(self, context): + """Get outcome_input_port specification dictionaries for items specified in monitor_for_control. + + Note: leave Projections unspecified, as they need to be added to self.aux_components + for validation and activation by Composition + + Return port specification dictionaries (*without* Projections to them specified), their value sizes, + and monitored ports (to be used as Projection specifications by _instantiate_input_ports) + """ + + # FIX: 11/3/21 - MOVE _parse_monitor_specs TO HERE FROM ObjectiveMechanism + from psyneulink.core.components.mechanisms.processing.objectivemechanism import _parse_monitor_specs + + monitored_ports = _parse_monitor_specs(self.monitor_for_control) + port_value_sizes = self._handle_arg_input_ports(self.monitor_for_control)[0] + + outcome_input_ports_option = self.outcome_input_ports_option + + outcome_input_port_specs = [] + + # SEPARATE outcome_input_ports OPTION: + # Assign separate outcome_input_ports for each item in monitored_ports + if outcome_input_ports_option == SEPARATE: + + # Construct port specification to assign its name + for i, monitored_port in enumerate(monitored_ports): + name = monitored_port.name + if isinstance(monitored_port, OutputPort): + name = f"{monitored_port.owner.name}[{name.upper()}]" + name = 'MONITOR ' + name + outcome_input_port_specs.append({PORT_TYPE: InputPort, + NAME: name}) + # Return list of outcome_input_port specifications (and their sizes) for each monitored item + + # SINGLE outcome_input_port OPTIONS: + # Either combine or concatenate inputs from all items specified in monitor_for_control + # as input to a single outcome_input_port + else: + + if outcome_input_ports_option == CONCATENATE: + function = Concatenate + + elif outcome_input_ports_option == COMBINE: + function = LinearCombination + + else: + assert False, f"PROGRAM ERROR: Unrecognized option ({outcome_input_ports_option}) passed " \ + f"to ControlMechanism._parse_monitor_for_control_input_ports() for {self.name}" + + port_value_sizes = [function().function(port_value_sizes)] + + # Return single outcome_input_port specification + outcome_input_port_specs.append({PORT_TYPE: InputPort, + NAME: 'OUTCOME', + FUNCTION: function}) + + return outcome_input_port_specs, port_value_sizes, monitored_ports + + def _validate_monitor_for_control(self, nodes): + # Ensure all of the Components being monitored for control are in the Composition being controlled + from psyneulink.core.components.ports.port import Port + invalid_outcome_specs = [item for item in self.monitor_for_control + if ((isinstance(item, Mechanism) + and item not in nodes) + or ((isinstance(item, Port) + and item.owner not in nodes)))] + if invalid_outcome_specs: + names = [item.name if isinstance(item, Mechanism) else item.owner.name + for item in invalid_outcome_specs] + raise ControlMechanismError(f"{self.name} has 'outcome_ouput_ports' that receive " + f"Projections from the following Components that do not " + f"belong to the Composition it controls: {names}.", + names) def _instantiate_output_ports(self, context=None): @@ -1436,7 +1696,7 @@ def _register_control_signal_type(self, context=None): ) def _instantiate_control_signals(self, context): - """Subclassess can override for class-specific implementation (see OptimiziationControlMechanism for example)""" + """Subclassess can override for class-specific implementation (see OptimizationControlMechanism for example)""" output_port_specs = list(enumerate(self.output_ports)) for i, control_signal in output_port_specs: @@ -1529,16 +1789,22 @@ def _instantiate_control_signal_type(self, control_signal_spec, context): # tests/composition/test_control.py::TestModelBasedOptimizationControlMechanisms::test_stateful_mechanism_in_simulation allocation_parameter_default = np.ones(np.asarray(allocation_parameter_default).shape) except (KeyError, IndexError, TypeError): - allocation_parameter_default = self.parameters.control_allocation.default_value + # if control allocation is a single value specified from + # default_variable for example, it should be used here + # instead of the "global default" defaultControlAllocation + if len(self.defaults.control_allocation) == 1: + allocation_parameter_default = copy.deepcopy(self.defaults.control_allocation) + else: + allocation_parameter_default = copy.deepcopy(defaultControlAllocation) control_signal = _instantiate_port(port_type=ControlSignal, - owner=self, - variable=self.defaults.default_allocation # User specified value - or allocation_parameter_default, # Parameter default - reference_value=allocation_parameter_default, - modulation=self.defaults.modulation, - port_spec=control_signal_spec, - context=context) + owner=self, + variable=self.defaults.default_allocation # User specified value + or allocation_parameter_default, # Parameter default + reference_value=allocation_parameter_default, + modulation=self.defaults.modulation, + port_spec=control_signal_spec, + context=context) if not type(control_signal) in convert_to_list(self.outputPortTypes): raise ProjectionError(f'{type(control_signal)} inappropriate for {self.name}') return control_signal @@ -1551,8 +1817,13 @@ def _check_for_duplicates(self, control_signal, control_signals, context): and also in the ControlMechanism's **control** arg control_signals arg passed in to allow override by subclasses - """ + Warn if control_signal shares any ControlProjections with others in control_signals. + Warn if control_signal is a duplicate of any in control_signals. + + Return True if control_signal is a duplicate + """ + duplicates = [] for existing_ctl_sig in control_signals: # OK if control_signal is one already assigned to ControlMechanism (i.e., let it get processed below); # this can happen if it was in deferred_init status and initalized in call to _instantiate_port above. @@ -1565,15 +1836,16 @@ def _check_for_duplicates(self, control_signal, control_signals, context): # A Projection in control_signal is not in this existing one: it is different, # so break and move on to next existing_mod_sig break - return + warnings.warn(f"{control_signal.name} for {self.name} duplicates another one specified " + f"({existing_ctl_sig.name}); it will be ignored.") + return True # Warn if *any* projections from control_signal are identical to ones in an existing control_signal projection_type = existing_ctl_sig.projection_type - if any( - any(new_p.receiver == existing_p.receiver - for existing_p in existing_ctl_sig.efferents) for new_p in control_signal.efferents): + if any(any(new_p.receiver == existing_p.receiver + for existing_p in existing_ctl_sig.efferents) for new_p in control_signal.efferents): warnings.warn(f"Specification of {control_signal.name} for {self.name} " - f"has one or more {projection_type}s redundant with ones already on " + f"has one or more {projection_type.__name__}s redundant with ones already on " f"an existing {ControlSignal.__name__} ({existing_ctl_sig.name}).") def show(self): @@ -1639,6 +1911,7 @@ def add_to_monitor(self, monitor_specs, context=None): output_ports = self.objective_mechanism.add_to_monitor(monitor_specs=monitor_specs, context=context) def _add_process(self, process, role:str): + assert False super()._add_process(process, role) if self.objective_mechanism: self.objective_mechanism._add_process(process, role) @@ -1659,6 +1932,7 @@ def _remove_default_control_signal(self, type:tc.enum(CONTROL_SIGNAL, GATING_SIG and not ctl_sig_attribute[0].efferents): self.remove_ports(ctl_sig_attribute[0]) + # FIX: 11/15/21 SHOULDN'T THIS BE PUT ON COMPOSITION?? def _activate_projections_for_compositions(self, composition=None): """Activate eligible Projections to or from Nodes in Composition. If Projection is to or from a node NOT (yet) in the Composition, @@ -1666,31 +1940,36 @@ def _activate_projections_for_compositions(self, composition=None): """ dependent_projections = set() - if self.objective_mechanism and composition and self.objective_mechanism in composition.nodes: - # Safe to add this, as it is already in the ControlMechanism's aux_components - # and will therefore be added to the Composition along with the ControlMechanism - from psyneulink.core.compositions.composition import NodeRole - assert (self.objective_mechanism, NodeRole.CONTROL_OBJECTIVE) in self.aux_components, \ - f"PROGRAM ERROR: {OBJECTIVE_MECHANISM} for {self.name} not listed in its 'aux_components' attribute." - dependent_projections.add(self._objective_projection) - - for aff in self.objective_mechanism.afferents: - dependent_projections.add(aff) - - for ms in self.control_signals: - for eff in ms.efferents: - dependent_projections.add(eff) - - # ??ELIMINATE SYSTEM - # FIX: 9/15/19 - HOW IS THIS DIFFERENT THAN objective_mechanism's AFFERENTS ABOVE? - # assign any deferred init objective mech monitored OutputPort projections to this system - if self.objective_mechanism and composition and self.objective_mechanism in composition.nodes: - for output_port in self.objective_mechanism.monitored_output_ports: - for eff in output_port.efferents: + if composition: + # Ensure that objective_mechanism has been included in the ControlMechanism's aux_components + # and then add all Projections to and from the objective_mechanism to it + if self.objective_mechanism and self.objective_mechanism in composition.nodes: + # Safe to assert this, as it is already in the ControlMechanism's aux_components + # and will therefore be added to the Composition along with the ControlMechanism + from psyneulink.core.compositions.composition import NodeRole + assert (self.objective_mechanism, NodeRole.CONTROL_OBJECTIVE) in self.aux_components, \ + f"PROGRAM ERROR: {OBJECTIVE_MECHANISM} for {self.name} " \ + f"not listed in its 'aux_components' attribute." + dependent_projections.add(self._objective_projection) + # Add all Projections to and from objective_mechanism + for aff in self.objective_mechanism.afferents: + dependent_projections.add(aff) + # for output_port in self.objective_mechanism.monitored_output_ports: + # for eff in output_port.efferents: + # dependent_projections.add(eff) + for eff in self.objective_mechanism.efferents: dependent_projections.add(eff) - - # ??ELIMINATE SYSTEM - # FIX: 9/15/19 - HOW IS THIS DIFFERENT THAN control_signal's EFFERENTS ABOVE? + else: + # FIX: 11/3/21: NEED TO MODIFY ONCE OUTCOME InputPorts ARE MOVED + # Add Projections to controller's OUTCOME InputPorts + # Note: this applies if ControlMechanism has an objective_mechanism that is not in the Composition + for i in range(self.num_outcome_input_ports): + for proj in self.outcome_input_ports[i].path_afferents: + dependent_projections.add(proj) + + # Warn if any efferents have been added to the ContolMechanism that are not ControlSignals + if len(self.control_projections) != len(self.efferents): + warnings.warn(f"Projections from {self.name} have been added to {composition} that are not ControlSignals.") for eff in self.efferents: dependent_projections.add(eff) @@ -1734,6 +2013,13 @@ def monitored_output_ports_weights_and_exponents(self): except: return None + @property + def num_outcome_input_ports(self): + try: + return len(self.outcome_input_ports) + except: + return 0 + @property def control_signals(self): """Get ControlSignals from OutputPorts""" diff --git a/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py index da75efb0382..dbf03790b30 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py @@ -185,11 +185,10 @@ from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism from psyneulink.core.components.ports.modulatorysignals.gatingsignal import GatingSignal -from psyneulink.core.globals.context import ContextFlags from psyneulink.core.globals.defaults import defaultGatingAllocation from psyneulink.core.globals.keywords import \ - GATING, GATING_PROJECTION, GATING_PROJECTIONS,GATING_SIGNAL,GATING_SIGNALS,GATING_SIGNAL_SPECS, \ - INIT_EXECUTE_METHOD_ONLY, MONITOR_FOR_CONTROL, MULTIPLICATIVE, PROJECTION_TYPE + GATING, GATING_PROJECTION, GATING_SIGNAL, GATING_SIGNALS, \ + INIT_EXECUTE_METHOD_ONLY, MONITOR_FOR_CONTROL, PROJECTION_TYPE from psyneulink.core.globals.parameters import Parameter from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -228,29 +227,6 @@ class GatingMechanismError(Exception): def __init__(self, error_value): self.error_value = error_value -def _gating_allocation_getter(owning_component=None, context=None): - return owning_component.control_allocation - -def _gating_allocation_setter(value, owning_component=None, context=None): - owning_component.parameters.control_allocation._set(np.array(value), context) - return value - -# def _control_allocation_getter(owning_component=None, context=None): -# from psyneulink.core.components.mechanisms.modulatory.controlmechanism import ControlMechanism -# from psyneulink.core.components.ports.modulatorysignals.controlsignal import ControlSignal -# raise GatingMechanismError(f"'control_allocation' attribute is not implemented on {owning_component.name}; " -# f"consider using a {ControlMechanism.__name__} instead, " -# f"or a {ControlMechanism.__name__} if both {ControlSignal.__name__}s and " -# f"{GatingSignal.__name__}s are needed.") -# -# def _control_allocation_setter(value, owning_component=None, context=None, **kwargs): -# from psyneulink.core.components.mechanisms.modulatory.controlmechanism import ControlMechanism -# from psyneulink.core.components.ports.modulatorysignals.controlsignal import ControlSignal -# raise GatingMechanismError(f"'control_allocation' attribute is not implemented on {owning_component.name}; " -# f"consider using a {ControlMechanism.__name__} instead, " -# f"or a {ControlMechanism.__name__} if both {ControlSignal.__name__}s and " -# f"{GatingSignal.__name__}s are needed.") - class GatingMechanism(ControlMechanism): """ @@ -431,12 +407,9 @@ class Parameters(ControlMechanism.Parameters): :read only: True """ # This must be a list, as there may be more than one (e.g., one per control_signal) - value = Parameter(np.array([defaultGatingAllocation]), aliases='control_allocation', pnl_internal=True) - gating_allocation = Parameter( + value = Parameter( np.array([defaultGatingAllocation]), - getter=_gating_allocation_getter, - setter=_gating_allocation_setter, - read_only=True, + aliases=['control_allocation', 'gating_allocation'], pnl_internal=True ) diff --git a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py index 647507de636..6c15e5b26bf 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py @@ -8,157 +8,192 @@ # ************************************** OptimizationControlMechanism ************************************************* +# FIX: REWORK WITH REFERENCES TO `outcome ` +# INTRODUCE SIMULATION INTO DISCUSSION OF COMPOSITION-BASED + """ +Contents +-------- + + * `OptimizationControlMechanism_Overview` + - `Expected Value of Control ` + - `Agent Representation and Types of Optimization ` + - `Model-Free" Optimization ` + - `Model-Based" Optimization ` + * `OptimizationControlMechanism_Creation` + - `Agent Rep ` + - `State Features ` + - `State Feature Functions ` + - `Outcome ` + * `OptimizationControlMechanism_Structure` + - `Agent Representation ` + - `State ` + - `Input ` + - `state_input_ports ` + - `outcome_input_ports ` + - `objective_mechanism ` + - `monitor_for_control ` + - `Function ` + - `OptimizationControlMechanism_Custom_Function` + - `OptimizationControlMechanism_Search_Functions` + - `OptimizationControlMechanism_Default_Function` + - `Output ` + - `Randomization ControlSignal ` + * `OptimizationControlMechanism_Execution` + - `OptimizationControlMechanism_Optimization_Procedure` + - `OptimizationControlMechanism_Estimation_Randomization` + * `OptimizationControlMechanism_Class_Reference` + + +.. _OptimizationControlMechanism_Overview: + Overview -------- -An OptimizationControlMechanism is a `ControlMechanism ` that uses an `OptimizationFunction` to find -an optimal `control_allocation ` for a given `state -`. The `OptimizationFunction` uses the OptimizationControlMechanism's -`evaluation_function` ` to evaluate `control_allocation -` samples, and then implements the one that yields the best predicted result. -The result returned by the `evaluation_function` ` is ordinarily -the `net_outcome ` computed by the OptimizationControlMechanism for the `Composition` -(or part of one) that it controls, and its `ObjectiveFunction` seeks to maximize this, which corresponds to -maximizing the Expected Value of Control, as described below. +An OptimizationControlMechanism is a `ControlMechanism ` that uses an `OptimizationFunction` to +optimize the performance of the `Composition` for which it is a `controller `. It does so +by using the `OptimizationFunction` (assigned as its `function `) to execute +its `agent_rep ` -- a representation of the Composition to be optimized -- +under different `control_allocations `, and selecting the one that optimizes +its `net_outcome `. A OptimizationControlMechanism can be configured to implement +forms of optimization, ranging from fully `model-based optimization ` +that uses the Composition itself as the `agent_rep ` to simulate the +outcome for a given `state ` (i.e., a combination of the current input and a +particular `control_allocation `), to fully `model-free optimization +` by using a `CompositionFunctionApproximator` as the `agent_rep +` that learns to predict the outcomes for a state. Intermediate forms of +optimization can also be implemented, that use simpler Compositions to approximate the dynamics of the full Composition. +The outcome of executing the `agent_rep ` is used to compute a `net_outcome +` for a given `state `, that takes into account +the `costs ` associated with the `control_allocation, and is used to determine +the optimal `control_allocations `. .. _OptimizationControlMechanism_EVC: **Expected Value of Control** -The `net_outcome ` of an OptimizationControlMechanism, like any `ControlMechanism` -is computed as the difference between the `outcome ` computed by its `objective_mechanism -` and the `costs ` of its `control_signals -` for a given `state ` (i.e., -set of `state_feature_values ` and `control_allocation -`. If the `outcome ` is configured to measure the -value of processing (e.g., reward received, time taken to respond, or a combination of these, etc.), -and the `OptimizationFunction` assigned as the OptimizationControlMechanism's `function -` is configured find the `control_allocation -` that maximizes its `net_outcome `, -then the OptimizationControlMechanism is said to be maximizing the `Expected Value of Control (EVC) -`_. That is, it implements a cost-benefit analysis that -weighs the `costs ` of the ControlSignal `values ` specified by a -`control_allocation ` against the `outcome ` expected -to result from it. The costs are computed based on the `cost_options ` specified for -each of the OptimizationControlMechanism's `control_signals ` and its -`combine_costs ` function. The EVC is determined by its `compute_net_outcome -` function (assigned to its `net_outcome ` -attribute), which is computed for a given `state ` by the -OptimizationControlMechanism's `evaluation_function `. +The `net_outcome ` of an OptimizationControlMechanism's `agent_rep +` is computed -- for a given `state ` +(i.e., set of `state_feature_values ` and a `control_allocation +` -- as the difference between the `outcome ` computed +by its `objective_mechanism ` and the aggregated `costs ` +of its `control_signals ` computed by its `combine_costs +` function. If the `outcome ` computed by the +`objective_mechanism ` is configured to measure the value of processing (e.g., +reward received, time taken to respond, or a combination of these, etc.), and the `OptimizationFunction` assigned as +the OptimizationControlMechanism's `function ` is configured find the +`control_allocation ` that maximizes its `net_outcome +` (that is, the `outcome ` discounted by the +result of the `combine_costs ` function, then the OptimizationControlMechanism is +said to be maximizing the `Expected Value of Control (EVC) `_. That +is, it implements a cost-benefit analysis that weighs the `costs ` of the ControlSignal +`values ` associated with a `control_allocation ` against +the `outcome ` expected to result from it. The costs are computed based on the +`cost_options ` specified for each of the OptimizationControlMechanism's `control_signals +` and its `combine_costs ` function. +The EVC is determined by its `compute_net_outcome ` function (assigned to its +`net_outcome ` attribute), which is computed for a given `state +` by the OptimizationControlMechanism's `evaluate_agent_rep +` method. In these respects, optimization of a Composition's +performance by its OptimizationControlMechanism -- as indexed by its `net_outcome ` +attribute -- implement a form of `Bounded Rationality `_, +also referred to as `Resource Rationality `_, +in which the constraints imposed by the "bounds" or resources are reflected in the `costs` of the ControlSignals +(also see `Computational Rationality `_ and `Toward a +Rational and Mechanistic Account of Mental Effort +`_). COMMENT: The table `below ` lists different parameterizations of OptimizationControlMechanism that implement various models of EVC Optimization. - -### FIX: THROUGHOUT DOCUMENT, REWORD AS "optimizing control_allocation" RATHER THAN "maximizing" / "greatest" -### FIX: RESTORE agent_rep from agent_rep - COMMENT .. _OptimizationControlMechanism_Agent_Representation_Types: **Agent Representation and Types of Optimization** -The defining characteristic of an OptimizationControlMechanism is its `agent representation -`, that is used to determine the `net_outcome -for a given `state ` and find the `control_allocation -` that optimizes this. The `agent_rep ` -can be either the `Composition` to which the OptimizationControlMechanism belongs (and controls) or another one that -is used to estimate the `net_outcome ` for that Composition. This distinction -corresponds closely to the distinction between *model-based* and *model-free* optimization in the `machine learning -`_ and `cognitive neuroscience `_ literatures, as described below. +Much of the functionality described above is supported by a `ControlMechanism` (the parent class of an +OptimizationControlMechanism). The defining characteristic of an OptimizationControlMechanism is its `agent +representation `, that is used to determine the `net_outcome +` for a given `state `, and find the +`control_allocation ` that optimizes this. The `agent_rep +` can be the `Composition` to which the OptimizationControlMechanism +belongs (and controls), another (presumably simpler) one, or a `CompositionFunctionApproximator`) that is used to +estimate the `net_outcome ` Composition of which the OptimizationControlMechanism is +the `controller `. These different types of `agent representation +` correspond closely to the distinction between *model-based* and +*model-free* optimization in the `machine learning +`_ +and `cognitive neuroscience `_ literatures, as described below. + +.. figure:: _static/Optimization_fig.svg + :scale: 50% + :alt: OptimizationControlMechanism + + **Functional Anatomy of an OptimizationControlMechanism.** *Panel A:* Examples of use in fully model-based + and model-free optimization. Note that in the example of `model-based optimization + ` (left), the OptimizationControlMechanism uses the entire + `Composition` that it controls as its `agent_rep `, whereas in + the example of `model-free optimization ` (right) the + the `agent_rep ` is a `CompositionFunctionApproximator`. The `agent_rep + ` can also be another (presumably simpler) Composition that can be used + to implement forms of optimization intermediate between fully model-based and model-free. *Panel B:* Flow of + execution during optimization. In both panels, faded items show process of adaptation when using a + `CompositionFunctionApproximator` as the `agent_rep `. +| +.. _OptimizationControlMechanism_Model_Based: -COMMENT: -FIX: THIS NEEDS TO BE RE-WRITTEN TO INDICATE THAT MODEL-BASED RELIES BOTH ON THE NATURE OF THE AGENT_REP AND THE STATE - FULL MODEL-BASED USES THE COMPOSITION ITSELF AS THE OCM (BEST ESTIMATE IT HAS FOR ITS OWN POLICY) AND ACCESS TO - STATE REPRESENTATIONS THAT FULLY DESCRIBE ALL EXPECTED STATES (I.E., DUPLICATE THE GENERATIVE PROCESS FOR) THE - ENVIRONMENT. SO, FULLY MODEL-BASED PROCESSING USES THE COMPOSITION ITSELF AS THE agent_rep AND A FULLY GENERATIVE - MODEL FOR THE ENVIRONMENT AS THE state_feature_function - - PROVIDES 1/2 OF THIS; - AT THE FAR OTHER EXTREME, MODEL-FREE CORRESPONDS TO USING THE CURRENT (OR PREDICTED) INPUTS FOR THE STATE - AND A "FLAT" REGRESSION MODEL (ONE STEP PREDICTION) FOR THE AGENT_REP -COMMENT +*Model-Based Optimization* + +The fullest form of this is implemented by assigning as the `agent_rep ` +the Composition for which the OptimizationControlMechanism is the `controller `). +On each `TRIAL `, that Composition *itself* is provided with either the most recent inputs +to the Composition, or ones predicted for the upcoming trial (as determined by the `state_feature_values +` of the OptimizationControlMechanism), and then used to simulate +processing on that trial in order to find the `control_allocation ` that yields +the best `net_outcome ` for that trial. A different Composition can also be assigned as +the `agent_rep `, that approximates in simpler form the dynamics of processing +in the Composition for which the OptimizationControlMechanism is the `controller `, +implementing a more restricted form of model-based optimization. .. _OptimizationControlMechanism_Model_Free: *"Model-Free" Optimization* -.. note:: - The term "model-free" is used here, but placed in "apology quotes," to reflect the fact that, while this term is - used widely (e.g., in machine learning and cognitive science) to distinguish it from "agent_rep-based" forms of - processing, "model-free" processing nevertheless relies on *some* form of agent_rep -- albeit a much simpler one -- - for learning, planning and decision making. Here, the distinction is between the use of the most complete form - of agent_rep (referred to as "agent_rep-based"), which the agent (i.e., `Composition` *itself*) serves as the "agent_rep," - and simpler forms of models that (can learn to) approximate the agent's behavior (e.g., reinforcement learning - algorithms or other forms of function approximation) that can be assigned as the OptimizationControlMechanism's - `agent_rep `. - -This is implemented by assigning as the `agent_rep ` a Composition other than the -one to which the OptimizationControlMechanism belongs (and for which it is the `controller `). -In each `TRIAL `, the `agent_rep ` is given the chance to -adapt, by adjusting its parameters in order to improve its prediction of the `net_outcome -` for the Composition (or part of one) that is controlled by the -OptimizationControlMechanism (based on the `state ` and `net_outcome -` of the prior trial). The `agent_rep ` is -then used to predict the `net_outcome ` for `control_allocation -` samples to find the one that yields the best predicted `net_outcome + .. note:: + The term *model-free* is placed in apology quotes to reflect the fact that, while this term is + used widely (e.g., in machine learning and cognitive science) to distinguish it from *model-based* forms of + processing, model-free processing nevertheless relies on *some* form of model -- albeit usually a much simpler + one -- for learning, planning and decision making. In the context of a OptimizationControlMechanism, this is + addressed by use of the term "agent_rep", and how it is implemented, as described below. + +This clearest form of this uses a `CompositionFunctionApproximator`, that learns to predict the `net_outcome +`net_outcome ` for a given state (e.g., using reinforcement learning or other forms +of function approximation, , such as a `RegressionCFA`). In each `TRIAL ` the `agent_rep +` is used to search over `control_allocation +`\\s, to find the one that yields the best predicted `net_outcome ` of processing on the upcoming trial, based on the current or (expected) -`state_feature_values ` for that trial. - -.. _OptimizationControlMechanism_Model_Based: - -*Model-Based Optimization* - -This is achieved by assigning as the `agent_rep ` the Composition to which the -OptimizationControlMechanism belongs (and for which it is the `controller `). On each -`TRIAL `, that Composition itself is used to simulate processing on the upcoming trial, based on -the current or (expected) `state_feature_values ` for that trial, -in order to find the `control_allocation ` that yields the best `net_outcome -` for that trial. +`state_feature_values ` for that trial. The `agent_rep +` is also given the chance to adapt in order to improve its prediction +of its `net_outcome ` based on the `state `, +and `net_outcome ` of the prior trial. A Composition can also be used to generate +such predictions permitting, as noted above, forms of optimization intermediate between the extreme examples of +model-based and model-free. .. _OptimizationControlMechanism_Creation: Creating an OptimizationControlMechanism ---------------------------------------- -An OptimizationControlMechanism is created in the same was as any `ControlMechanism `. -The following arguments of its constructor are specific to the OptimizationControlMechanism: - -* **state_features** -- takes the place of the standard **input_ports** argument in the constructor for a Mechanism`, - and specifies the values used by the OptimizationControlMechanism, together with a `control_allocation - `, to calculate a `net_outcome `. For - `model-based optimzation ` these are also used as the inputs to the - Composition (i.e., `agent_rep `) when it's `evaluate ` - method is called (see `OptimizationControlMechanism_State_Features` below). Features can be specified using - any of the following, singly or combined in a list: - - * *InputPort specification* -- this can be any form of `InputPort specification ` - that resolves to an OutputPort from which the InputPort receives a Projection; the `value - ` of that OutputPort is used as one of the `state_feature_values - ` for the `state_features - ` of the OptimizationControlMechanism. Each of these InputPorts is - marked as `internal_only ` = `True`. - - Features can also be added to an existing OptimizationControlMechanism using its `add_state_features` method. If the - **state_features** argument is not specified, then the `input ` to the `Composition` on - the last trial of its execution is used to predict the `net_outcome ` for the upcoming - trial. - -.. _OptimizationControlMechanism_Feature_Function: - -* **state_feature_function** -- specifies `function ` of the InputPort created for each item listed in - **state_features**. By default, this is the identity function, that assigns the current value of the feature to the - OptimizationControlMechanism's `state_feature_values `attribute. - However, other functions can be assigned, for example to maintain a record of past values, integrate them over - trials, and/or provide a generative model of the environment (for use in `model-based processing - `. -.. -* **agent_rep** -- specifies the `Composition` used by the OptimizationControlMechanism's `evaluation_function - ` to calculate the predicted `net_outcome +The constructor has the same arguments as a `ControlMechanism `, with the following +exceptions/additions, which are specific to the OptimizationControlMechanism: + +.. _OptimizationControlMechanism_Agent_Rep_Arg: + +* **agent_rep** -- specifies the `Composition` used by the OptimizationControlMechanism's `evaluate_agent_rep + ` method to calculate the predicted `net_outcome ` for a given `state ` (see `below ` for additional details). If it is not specified, then the `Composition` to which the OptimizationControlMechanism is assigned becomes its `agent_rep @@ -167,122 +202,380 @@ optimization. If that Composition already has a `controller ` specified, the OptimizationControlMechanism is disabled. If another Composition is specified, it must conform to the specifications for an `agent_rep ` as described `below - `. + `. The `agent_rep ` can also be + a `CompositionFunctionApproximator` for `model-free ` forms of + optimization. The type of Component assigned as the `agent_rep ` is + identified in the OptimizationControlMechanism's `agent_rep_type ` + attribute. + +.. _OptimizationControlMechanism_State_Features_Arg: + +* **state_features** -- specifies the values provided by the OptimizationControlMechanism as the input to the + `agent_rep ` when used, together with a selected `control_allocation + `, to estimate or predict the Composition's `net_outcome + `. These are used to construct the `state_input_ports + ` for the OptimizationControlMechanism, that provide the + `agent_rep` with its input, and thus the specification requirements for + **state_features** depend on whether the `agent_rep` is a `Composition` + or a `CompositionFunctionApproximator`: + + .. _OptimizationControlMechanism_Agent_Rep_Composition: + + * *agent_rep is a Composition* -- the **state_features** specify the inputs to the Composition when it is executed + by the OptimizationControlMechanism to `evaluate ` its performance. + If **state_features** is not specified, this is done automatically by constructing a set of `state_input_ports + ` that `shadow the input ` to every + `InputPort` of every `INPUT ` `Node ` of the Composition assigned as + the `agent_rep `. In this case, if `controller_mode + ` of the Composition for which the OptimizationControlMechanism is the `controller + ` is set to *AFTER* (the default), the `input ` to + the Composition on the current trial is used as its input to the `agent_rep + ` for the optimization process; if the `controller_mode + ` is *BEFORE*, then the inputs from the previous trial are used. + + The **state_features** argument can also be specified explicitly, using the formats described below. This is + useful if different functions need to be assigned to different `state_input_ports + ` used to generate the corresponding `state_feature_values + state_feature_values ` (see `below + `). However, doing so overrides the automatic + assignment of all state_features, and so a complete and appropriate set of specifications must be provided + (see note below). + + .. _OptimizationControlMechanism_State_Features_Shapes: + + .. note:: + If **state_features** *are* specified explicitly when the `agent_rep ` + is a Composition, there must be one for every `InputPort` of every `INPUT ` `Node + ` in that Composition, and these must match -- both individually, and in their order -- + the `inputs to the Composition `) required by its `run ` + method. Failure to do so generates an error indicating this. + + .. _OptimizationControlMechanism_Selective_Input: + + .. hint:: + For cases in which only a subset of the inputs to the Composition are relevant to its optimization (e.g., + the others should be held constant), it is still the case that all must be specified as **state_features** + (see note above). This can be handled several ways. One is by specifying (as required) **state_features** + for all of the inputs, and assigning *state_feature_functions** (see `below + `) such that those assigned to the desired + inputs pass their values unmodified, while those for the inputs that are to be ignored return a constant value. + Another approach, for cases in which the desired inputs pertain to a subset of Components in the Composition + that solely responsible for determining its `net_outcome `, is to assign those + Components to a `nested Composition ` and assign that Composition as the `agent_rep + `. A third, more sophisticated approach, would be to assign + ControlSignals to the InputPorts for the irrelevant features, and specify them to suppress their values. + + .. _OptimizationControlMechanism_Agent_Rep_CFA: + + * *agent_rep is a CompositionFunctionApproximator* -- the **state_features** specify the inputs to the + CompositionFunctionApproximator's `evaluate ` method. This is not + done automatically (see warning below). + + .. warning:: + The **state_features** specified when the `agent_rep ` is a + `CompositionFunctionApproximator` must align with the arguments of its `evaluate + ` method. Since the latter cannot always be determined automatically, + the `state_input_ports ` cannot be created automatically, nor + can the **state_features** specification be validated; thus, specifying inappropriate **state_features** may + produce errors that are unexpected or difficult to interpret. + + COMMENT: + FIX: CONFIRM (OR IMPLEMENT?) THE FOLLOWING + If all of the inputs to the Composition are still required, these can be specified using the keyword *INPUTS*, + in which case they are retained along with any others specified. + COMMENT + + .. _OptimizationControlMechanism_State_Features_Shadow_Inputs: + + The specifications in the **state_features** argument are used to construct the `state_input_ports + `, and can be any of the following, used either singly or in a list: + + * *InputPort specification* -- this creates an `InputPort` as one of the OptimizationControlMechanism's + `state_input_ports ` that `shadows ` the + input to the specified InputPort; that is, the value of which is used as the corresponding value of the + OptimizationControlMechanism's `state_feature_values `. + + .. technical_note:: + The InputPorts specified as state_features are marked as `internal_only ` = `True`. + + * *OutputPort specification* -- this can be any form of `OutputPort specification ` + for any `OutputPort` of another `Mechanism ` in the Composition; the `value ` + of the specified OutputPort is used as the corresponding value of the OptimizationControlMechanism's + `state_feature_values `. + + * *Mechanism* -- if the `agent_rep ` is a Composition, it must be an + `INPUT ` `Node ` of that Composition, and the Mechanism's `primary InputPort + ` is used (since in this case the state_feature must correspond to an input to the Composition). + If the `agent_rep ` is a `CompositionFunctionApproximator`, then the + Mechanism's `primary OutputPort ` is used (since is the typically usage for specifying an + InputPort); if the input to the Mechanism is to be shadowed, then its InputPort must be specified explicitly. + + COMMENT: + FIX: CONFIRM THAT THE FOLLOWING ALL WORK + COMMENT + State features can also be added to an existing OptimizationControlMechanism using its `add_state_features` method. + +.. _OptimizationControlMechanism_State_Feature_Functions_Arg: + +* **state_feature_functions** -- specifies the `function(s) ` assigned to the `state_input_ports + ` created for each of the corresponding **state_features**. + If **state_feature_functions** is not specified, the identity function is assigned to all of the `state_input_ports + ` (whether those were created automatically or explicitly specified; + see `above `). However, other functions can be specified + individually for the `state_input_ports ` associated with each + state_feature. This can be useful, for example to provide an average or integrated value of prior inputs, to + select specific inputs for use (see `hint ` above), and/or use a + generative model of the environment to provide inputs to the `agent_rep ` + during the optimization process. This can be done by specifying the **state_feature_functions** argument with a + dict with keys that match each of the specifications in the **state_features** argument, and corresponding values + that specify the function to use for each. + + .. note:: + A dict can be used to specify **state_feature_functions** only if **state_features** are specified explicitly + (see `above `). The dict must contain one entry for + each of the items specified in **state_features**, and the value returned by each function must preserve the + shape of its input, which must match that of the corresponding input to the Composition's `run + ` method (see `note ` above). + +.. _OptimizationControlMechanism_Outcome_Args: + +* **Outcome arguments** -- these specify the Components, the values of which are assigned to the `outcome + ` attribute, and used to compute the `net_outcome ` for a + given `control_allocation ` (see `OptimizationControlMechanism_Execution`). + As with a ControlMechanism, these can be sepcified directly in the **monitor_for_control** argument, or through the + use of `ObjectiveMechanism` specified in the **objecctive_mechanism** argument (see + ControlMechanism_Monitor_for_Control for additional details). However, an OptimizationControlMechanism places some + restrictions on the specification of these arguments that, as with specification of `state_features + `, depend on the nature of the `agent_rep + `, as described below. + + * *agent_rep is a Composition* -- the items specified to be monitored for control must belong to the `agent_rep + `, since those are the only ones that will be executed when the + `evaluate_agent_rep ` is called; an error will be generated + identifying any Components that do not belong to the `agent_rep `. + + * *agent_rep is a CompositionFunctionApproximator* -- the items specified to be monitored for control can be any + within the Composition for which the OptimizationControlMechanism is the `controller `; + this is because their values during the last execution of the Composition are used to determine the `net_outcome + ` that the `agent_rep `\\'s + `adapt ` method -- if it has one -- seeks to predict. Accordingly, + the values of the items specified to be monitored control must match, in shape and order, the + **net_outcome** of that `adapt ` method. + +* **Optimization arguments** -- these specify parameters that determine how the OptimizationControlMechanism's + `function ` searches for and determines the optimal `control_allocation + ` (see `OptimizationControlMechanism_Execution`); this includes specification + of the `num_estimates ` and `num_trials_per_estimate + ` parameters, as well as the `random_variables + `, `initial_seed ` and + `same_seed_for_all_allocations ` Parameters, which + determine how the `net_outcome ` is estimated for a given `control_allocation + ` (see `OptimizationControlMechanism_Estimation_Randomization` for additional + details). .. _OptimizationControlMechanism_Structure: Structure --------- -In addition to the standard Components associated with a `ControlMechanism`, including a `Projection ` -to its *OUTCOME* InputPort from its `objective_mechanism `, and a -`function ` used to carry out the optimization process, it has several -other constiuents, as described below. +An OptimizationControlMechanism conforms to the structure of a `ControlMechanism`, with the following exceptions +and additions. -.. _OptimizationControlMechanism_ObjectiveMechanism: +.. _OptimizationControlMechanism_Agent_Rep: -*ObjectiveMechanism* -^^^^^^^^^^^^^^^^^^^^ - -Like any `ControlMechanism`, an OptimizationControlMechanism may be assigned an `objective_mechanism -` that is used to evaluate the outcome of processing for a given trial (see -`ControlMechanism_Objective_ObjectiveMechanism). This passes the result to the OptimizationControlMechanism, which it -places in its `outcome ` attribute. This is used by its `compute_net_outcome -` function, together with the `costs ` of its -`control_signals `, to compute the `net_outcome ` of -processing for a given `state `, and that is returned by `evaluation` method of the -OptimizationControlMechanism's `agent_rep `. - -.. note:: - The `objective_mechanism ` is distinct from, and should not be - confused with the `objective_function ` parameter of the - OptimizationControlMechanism's `function `. The `objective_mechanism - ` evaluates the `outcome ` of processing - without taking into account the `costs ` of the OptimizationControlMechanism's - `control_signals `. In contrast, its `evaluation_function - `, which is assigned as the - `objective_function` parameter of its `function `, takes the `costs - ` of the OptimizationControlMechanism's `control_signals ` - into account when calculating the `net_outcome` that it returns as its result. +*Agent Representation* +^^^^^^^^^^^^^^^^^^^^^^ + +The defining feature of an OptimizationControlMechanism is its agent representation, specified in the **agent_rep** +argument of its constructor, and assigned to its `agent_rep ` attribute. This +designates a representation of the `Composition` (or parts of it) that the OptimizationControlMechanism uses to +evaluate sample `control_allocations ` in order to find one that optimizes the +the `net_outcome ` of the Composition when it is fully executed. The `agent_rep +` can be the Composition itself for which the OptimizationControlMechanism is +the `controller ` (fully `model-based optimization `, +or another one `model-free optimization `), that is usually a simpler +Composition or a `CompositionFunctionApproximator` used to estimate the `net_outcome ` +for the full Composition (see `above `). The `evaluate +` method of the `agent_rep ` is assigned as the +`evaluate_agent_rep ` method of the OptimizationControlMechanism. +If the `agent_rep ` is not the Composition for which the +OptimizationControlMechanism is the controller, then it must meet the following requirements: + +* Its `evaluate ` method must accept as its first four positional arguments: + + - values that correspond in shape to the `state_feature_values + ` (inputs for estimate); + - `control_allocation ` (the set of parameters for which estimates + of `net_outcome ` are made); + - `num_trials_per_estimate ` (number of trials executed by + agent_rep for each estimate). +.. +* If it has an `adapt ` method, that must accept as its first three + arguments, in order: + + - values that correspond to the shape of the `state_feature_values + ` (inputs that led to the net_come); + - `control_allocation ` (set of parameters that led to the net_outcome); + - `net_outcome ` (the net_outcome that resulted from the `state_feature_values + ` and `control_allocation + `) that must match the shape of `outcome `. + COMMENT: + - `num_estimates ` (number of estimates of `net_outcome + ` made for each `control_allocation `). + COMMENT + + .. _OptimizationControlMechanism_State: + +*State* +~~~~~~~ + +The current state of the OptimizationControlMechanism -- or, more properly, its `agent_rep +` -- is determined by the OptimizationControlMechanism's current +`state_feature_values ` (see `below +`) and `control_allocation `. +These are provided as input to the `evaluate_agent_rep ` method, +the results of which are used together with the `costs ` associated with the +`control_allocation `, to evaluate the `net_outcome +` for that state. + +.. _OptimizationControlMechanism_Input: + +*Input* +^^^^^^^ + +An OptimizationControlMechanism has two types of `input_ports `, corresponding to the two +forms of input it requires: `state_input_ports ` that provide the values +of the Components specified as its `state_features `, and that are used +as inputs to the `agent_rep ` when its `evaluate ` method +is used to execute it; and `outcome_input_ports ` that provide the +outcome of executing the `agent_rep `, that is used to compute the `net_outcome +` for the `control_allocation ` under which the +execution occurred. Each of these is described below. .. _OptimizationControlMechanism_State_Features: -*State Features* -^^^^^^^^^^^^^^^^ - -In addition to its `primary InputPort ` (which typically receives a projection from the -*OUTCOME* OutputPort of the `objective_mechanism `, -an OptimizationControlMechanism also has an `InputPort` for each of its state_features. By default, these are the current -`input ` for the Composition to which the OptimizationControlMechanism belongs. However, -different values can be specified, as can a `state_feature_function ` -that transforms these. For OptimizationControlMechanisms that implement `model-free -` optimization, its `state_feature_values -` are used by its `evaluation_function -` to predict the `net_outcome ` for a -given `control_allocation `. For OptimizationControlMechanisms that implement -fully `agent_rep-based ` optimization, the `state_feature_values -` are used as the Composition's `input ` -when it is executed to evaluate the `net_outcome ` for a given -`control_allocation`. +*state_input_ports* +~~~~~~~~~~~~~~~~~~~ + +The `state_input_ports ` receive `Projections ` +from the Components specified as the OptimizationControlMechanism's `state_features +`, the values of which are assigned as the `state_feature_values +`, and conveyed to the `agent_rep +` when it is `executed `. If the +`agent_rep is a `Composition `, then the +OptimizationControlMechanism has a state_input_port for every `InputPort` of every `INPUT ` `Node +` of the `agent_rep ` Composition, each of which receives +a `Projection` that `shadows the input ` of the corresponding state_feature. If the +`agent_rep is a CompositionFunctionApproximator `, +then the OptimizationControlMechanism has a state_input_port that receives a Projection from each Component specified +in the **state_features** arg of its constructor. + +COMMENT: +In either, case the the `values ` of the +`state_input_ports ` are assigned to the `state_feature_values +` attribute that is used, in turn, by the +OptimizationControlMechanism's `evaluate_agent_rep ` method to +estimate or predict the `net_outcome ` for a given `control_allocation +` (see `OptimizationControlMechanism_Execution`). State features can be of two types: -* *Input Features* -- these are values received as input by other Mechanisms in the `Composition`. They are - specified as `shadowed inputs ` in the **state_features** argument of the - OptimizationControlMechanism's constructor (see `OptimizationControlMechanism_Creation`). An InputPort is - created on the OptimizationControlMechanism for each feature, that receives a `Projection` paralleling - the input to be shadowed. +* *Input Features* -- these are values that shadow the input received by a `Mechanisms ` in the + `Composition` for which the OptimizationControlMechanism is a `controller ` (irrespective + of whether that is the OptimizationControlMechanism`s `agent_rep `). + They are implemented as `shadow InputPorts ` (see + `OptimizationControlMechanism_State_Features_Shadow_Inputs` for specification) that receive a + `Projection` from the same source as the Mechanism being shadowed. .. -* *Output Features* -- these are the `value ` of an `OutputPort` of some other `Mechanism ` - in the Composition. These too are specified in the **state_features** argument of the OptimizationControlMechanism's - constructor (see `OptimizationControlMechanism_Creation`), and each is assigned a `Projection` from the specified - OutputPort(s) to the InputPort of the OptimizationControlMechanism for that feature. +* *Output Features* -- these are the `value ` of an `OutputPort` of `Mechanism ` in the + `Composition` for which the OptimizationControlMechanism is a `controller ` (again, + irrespective of whether it is the OptimizationControlMechanism`s `agent_rep + `); and each is assigned a + `Projection` from the specified OutputPort(s) to the InputPort of the OptimizationControlMechanism for that feature. + +The InputPorts assigned to the **state_features** are listed in the OptimizationControlMechanism's `state_input_port +` attribute, and their current `values ` are +listed in its `state_feature_values ` attribute. + +The InputPorts assigned to the **state_features** are listed in the OptimizationControlMechanism's `state_input_port +` attribute, and their current `values ` are +listed in its `state_feature_values ` attribute. +COMMENT -The current `value ` of the InputPorts for the state_features are listed in the `state_feature_values -` attribute. +.. _OptimizationControlMechanism_Outcome: -.. _OptimizationControlMechanism_State: +*outcome_input_ports* +~~~~~~~~~~~~~~~~~~~~~ -*State* -^^^^^^^ +The `outcome_input_ports ` comprise either a single `OutputPort` +that receives a `Projection` from the OptimizationControlMechanism's `objective_mechanism +` if has one; or, if it does not, then an OutputPort for each +Component it `monitors ` to determine the `net_outcome +` of executing its `agent_rep ` (see `outcome +arguments ` for how these are specified). The value(s) of the +`outcome_input_ports ` are assigned to the +OptimizationControlMechanism's `outcome ` attribute. -The state of the Composition (or part of one) controlled by an OptimizationControlMechanism is defined by a combination -of `state_feature_values ` for its state_features ` (see `above `) and a `control_allocation -`. +.. _OptimizationControlMechanism_ObjectiveMechanism: -.. _OptimizationControlMechanism_Agent_Rep: +*objective_mechanism* + +If an OptimizationControlMechanism has an `objective_mechanism `, it is +assigned a single outcome_input_port, named *OUTCOME*, that receives a Projection from the objective_mechanism's +`OUTCOME OutputPort `. The OptimizationControlMechanism's `objective_mechanism +` is used to evaluate the outcome of executing its `agent_rep +` for a given `state `. This passes +the result to the OptimizationControlMechanism's *OUTCOME* InputPort, that is placed in its `outcome +` attribute. + + .. note:: + An OptimizationControlMechanism's `objective_mechanism ` and its `function + ` are distinct from, and should not be confused with the `objective_function + ` parameter of the OptimizationControlMechanism's `function + `. The `objective_mechanism `\\'s + `function ` evaluates the `outcome ` of processing + without taking into account the `costs ` of the OptimizationControlMechanism's + `control_signals `. In contrast, its `evaluate_agent_rep + ` method, which is assigned as the `objective_function` + parameter of its `function `, takes the `costs ` + of the OptimizationControlMechanism's `control_signals ` into + account when calculating the `net_outcome` that it returns as its result. -*Agent Representation* -^^^^^^^^^^^^^^^^^^^^^^ +COMMENT: +ADD HINT HERE RE: USE OF CONCATENATION + +the items specified by `monitor_for_control +` are all assigned `MappingProjections ` to a single +*OUTCOME* InputPort. This is assigned `Concatenate` as it `function `, which concatenates the +`values ` of its Projections into a single array (that is, it is automatically configured +to use the *CONCATENATE* option of a ControlMechanism's `outcome_input_ports_option +` Parameter). This ensures that the input to the +OptimizationControlMechanism's `function ` has the same format as when an +`objective_mechanism ` has been specified, as described below. +COMMENT -The defining feature of an OptimizationControlMechanism is its agent representation, specified in the **agent_rep** -argument of its constructor and assigned to its `agent_rep ` attribute. This -designates a representation of the `Composition` (or parts of it) that the OptimizationControlMechanism controls, and -that it uses to evaluate sample `control_allocations ` in order to find the one -that optimizes the `net_outcome ` when the Composition is fully executed. The `agent_rep -` is always itself a `Composition`, that can be either the same one that the -OptimizationControlMechanism controls (fully `model-based optimization `, -or another one (`model-free optimization `) that is used to estimate -the `net_outcome ` for that Composition (see `above -`). The `evaluate ` method of the -Composition is assigned as the `evaluation_function ` of the -OptimizationControlMechanism. If the `agent_rep ` is not the Composition for -which the OptimizationControlMechanism is the controller, then it must meet the following requirements: - - * Its `evaluate ` method must accept as its first three arguments, in order: - values that correspond in shape to the `state_feature_values - `, - `control_allocation ` and - `num_estimates ` - attributes of the OptimizationControlMechanism, respectively. - .. - * If it has an `adapt ` method, that must accept as its first three arguments, in order: - values that correspond to the shape of the `state_feature_values - `, - `control_allocation ` and - `net_outcome ` - attributes of the OptimizationControlMechanism, respectively. +.. _OptimizationControlMechanism_Monitor_for_Control: + +*monitor_for_control* + +If an OptimizationControlMechanism is not assigned an `objective_mechanism `, +then its `outcome_input_ports ` are determined by its +`monitor_for_control ` and `outcome_input_ports_option +` attributes, specified in the corresponding arguments of its +constructor (see `Outcomes arguments `), and the `allow_probes +` attribute of the Composition for which the OptimizationControlMechanism is the +`controller `. The latter allows the values of the items listed in `monitor_for_control +` to be `INPUT ` or `INTERNAL ` `Nodes +` of a `nested Composition ` to be monitored and included in the computation +of `outcome ` (ordinarily, those must be `OUTPUT ` Nodes of a nested +Composition). This can be thought of as providing access to "latent variables" of the Composition being evaluated; +that is, ones that do not contribute directly to the Composition's `results `. This +applies both to items that are monitored directly by the OptimizationControlMechanism or via its ObjectiveMechanism +(see `allow_probes ` above for additional details). + +The value(s) of the specified Components are assigned as the OptimizationControlMechanism's `outcome +` attribute, which is used to compute the `net_outcome ` +of executing its `agent_rep `. .. _OptimizationControlMechanism_Function: @@ -293,42 +586,41 @@ `control_allocation ` that optimizes the `net_outcome ` for the current (or expected) `state `. It is generally an `OptimizationFunction`, which in turn has `objective_function -`, `search_function ` -and `search_termination_function ` methods, as well as a `search_space -` attribute. The OptimizationControlMechanism's `evaluation_function -` is automatically assigned as the -OptimizationFunction's `objective_function `, and is used to -evaluate each `control_allocation ` sampled from the `search_space -` by the `search_function `search_function ` -until the `search_termination_function ` returns `True`. -A custom function can be assigned as the `function ` of an -OptimizationControlMechanism, however it must meet the following requirements: - -.. _OptimizationControlMechanism_Custom_Funtion: +`, `search_function ` and +`search_termination_function ` methods, as well as a `search_space +` attribute. The `objective_function ` +is automatically assigned the OptimizationControlMechanism's `evaluate_agent_rep +` method, that is used to evaluate each `control_allocation +` sampled from the `search_space ` by the +`search_function ` until the `search_termination_function +` returns `True` (see `OptimizationControlMechanism_Execution` +for additional details). + +.. _OptimizationControlMechanism_Custom_Function: + +*Custom Function* +~~~~~~~~~~~~~~~~~ + +A custom function can be assigned as the OptimizationControlMechanism's `function +`, however it must meet the following requirements: - It must accept as its first argument and return as its result an array with the same shape as the OptimizationControlMechanism's `control_allocation `. .. - - It must implement a `reset` method that accepts **objective_function** as a keyword argument and - implements an attribute with the same name. - - COMMENT: - - it must implement a `reset` method that accepts as keyword arguments **objective_function**, - **search_function**, **search_termination_function**, and **search_space**, and implement attributes - with corresponding names. - COMMENT - -If **function** argument is not specified, the `GridSearch` `OptimiziationFunction` is assigned as the default, -which evaluates the `net_outcome ` using the OptimizationControlMechanism's -`control_allocation_search_space ` as its -`search_space `, and returns the `control_allocation -` that yields the greatest `net_outcome `, -thus implementing a computation of `EVC `. + - It must execute the OptimizationControlMechanism's `evaluate_agent_rep + ` `num_estimates ` + times, and aggregate the results in computing the `net_outcome ` for a given + `control_allocation ` (see + `OptimizationControlMechanism_Estimation_Randomization` for additional details). + .. + - It must implement a `reset` method that can accept as keyword arguments **objective_function**, + **search_function**, **search_termination_function**, and **search_space**, and implement attributes + with corresponding names. -COMMENT: .. _OptimizationControlMechanism_Search_Functions: *Search Function, Search Space and Search Termination Function* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Subclasses of OptimizationControlMechanism may implement their own `search_function ` and `search_termination_function @@ -339,44 +631,152 @@ OptimizationControlMechanism's constructor, as long as they are compatible with the requirements of the OptimizationFunction and OptimizationControlMechanism. If they are not specified, then defaults specified either by the OptimizationControlMechanism or the OptimizationFunction are used. -COMMENT + +.. _OptimizationControlMechanism_Default_Function: + +*Default Function: GridSearch* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If the **function** argument is not specified, the `GridSearch` `OptimizationFunction` is assigned as the default, +which evaluates the `net_outcome ` using the OptimizationControlMechanism's +`control_allocation_search_space ` as its +`search_space `, and returns the `control_allocation +` that yields the greatest `net_outcome `, +thus implementing a computation of `EVC `. + + +.. _OptimizationControlMechanism_Output: + +*Output* +^^^^^^^^ + +The output of OptimizationControlMechanism are its `control_signals ` that implement +the `control_allocations ` it evaluates and optimizes. These their effects are +estimated over variation in the values of Components with random variables, then the OptimizationControlMechanism's +`control_signals ` include an additional *RANDOMIZATION_CONTROL_SIGNAL* that +implements that variablity for the relevant Components, as described below. + +.. _OptimizationControlMechanism_Randomization_Control_Signal: + +*Randomization ControlSignal* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If `num_estimates ` is specified (that is, it is not None), +a `ControlSignal` is automatically added to the OptimizationControlMechanism's `control_signals +`, named *RANDOMIZATION_CONTROL_SIGNAL*, that randomizes +the values of random variables in the `agent_rep ` over estimates of its +`net_outcome `. The `initial_seed ` and +`same_seed_for_all_allocations ` Parameters can also be +used to further refine randomization (see `OptimizationControlMechanism_Estimation_Randomization` for additional +details). + +.. technical_note:: + + The *RANDOMIZATION_CONTROL_SIGNAL* ControlSignal sends a `ControlProjection` to the `ParameterPort` for the + see `Parameter` of Components specified either in the OptimizationControlMechanism's `random_variables + ` attribute or that of the `agent_rep + ` (see above). The *RANDOMIZATION_CONTROL_SIGNAL* is also included when + constructing the `control_allocation_search_space ` passed + to the constructor for OptimizationControlMechanism's `function `, + as its **search_space** argument, along with the index of the *RANDOMIZATION_CONTROL_SIGNAL* as its + **randomization_dimension** argument. .. _OptimizationControlMechanism_Execution: Execution --------- -When an OptimizationControlMechanism is executed, it carries out the following steps: +When an OptimizationControlMechanism is executed, the `OptimizationFunction` assigned as it's `function +` is used evaluate the effects of different `control_allocations +` to find one that optimizes the `net_outcome `; +that `control_allocation ` is then used when the Composition controlled by the +OptimizationControlMechanism is next executed. The OptimizationFunction does this by either simulating performance +of the Composition or executing the CompositionFunctionApproximator that is its `agent_rep +`. - * Calls `adapt` method of its `agent_rep ` to give that a chance to modify - its parameters in order to better predict the `net_outcome ` for a given `state - `, based the state and `net_outcome ` of the - previous trial. - .. - * Calls `function ` to find the `control_allocation - ` that optimizes `net_outcome `. The - way in which it searches for the best `control_allocation ` is determined by - the type of `OptimzationFunction` assigned to `function `, whereas the way - that it evaluates each one is determined by the OptimizationControlMechanism's `evaluation_function - `. More specifically: - - * The `function ` selects a sample `control_allocation +.. _OptimizationControlMechanism_Optimization_Procedure: + +*Optimization Procedure* +^^^^^^^^^^^^^^^^^^^^^^^^ + +When an OptimizationControlMechanism is executed, it carries out the following steps to find a `control_allocation +` that optmimzes performance of the Composition that it controls: + + .. _OptimizationControlMechanism_Adaptation: + + * *Adaptation* -- if the `agent_rep ` is a `CompositionFunctionApproximator`, + its `adapt ` method, allowing it to modify its parameters in order to better + predict the `net_outcome ` for a given `state `, + based the state and `net_outcome ` of the previous `TRIAL `. + + .. _OptimizationControlMechanism_Evaluation: + + * *Evaluation* -- the OptimizationControlMechanism's `function ` is + called to find the `control_allocation ` that optimizes `net_outcome + ` of its `agent_rep ` for the current + `state `. The way in which it searches for the best `control_allocation + ` is determined by the type of `OptimizationFunction` assigned to `function + `, whereas the way that it evaluates each one is determined by the + OptimizationControlMechanism's `evaluate_agent_rep ` method. + More specifically, it carries out the following procedure: + + .. _OptimizationControlMechanism_Estimation: + + * *Estimation* - the `function ` selects a sample `control_allocation ` (using its `search_function ` - to select one from its `search_space `), and evaluates the predicted - `net_outcome ` for that `control_allocation - ` using the OptimizationControlMechanism's `evaluation_function` - ` and the current `state_feature_values - `. - .. - * It continues to evaluate the `net_outcome ` for `control_allocation - ` samples until its `search_termination_function + to select one from its `search_space `), and evaluates the `net_outcome + ` for that `control_allocation `. + It does this by calling the OptimizationControlMechanism's `evaluate_agent_rep + ` method `num_estimates ` times, + each with the current `state_feature_values ` as its input, + and executing it for `num_trials_per_estimate ` trials + for each estimate. The `control_allocation ` remains fixed for each + estimate, but the random seed of any Parameters that rely on randomization is varied, so that the values of those + Parameters are randomly sampled for every estimate (see `OptimizationControlMechanism_Estimation_Randomization`). + + * *Aggregation* - the `function `\\'s `aggregation_function + ` is used to aggregate the `net_outcome + ` over the all the estimates for a given `control_allocation + `, and the aggregated value is returned as the `outcome + ` and used to the compute the `net_outcome ` + for that `control_allocation `. + + * *Termination* - the `function ` continues to evaluate samples of + `control_allocations ` provided by its `search_function + ` until its `search_termination_function ` returns `True`. - .. - * Finally, it implements the `control_allocation ` that yielded the optimal - `net_outcome `. This is used by the OptimizationControlMechanism's `control_signals - ` to compute their `values ` which, in turn, are used by - their `ControlProjections ` to modulate the parameters they control when the Composition is - next executed. + + .. _OptimizationControlMechanism_Control_Assignment: + + * *Assignment* - when the search completes, the `function ` + assigns the `control_allocation ` that yielded the optimal value of + `net_outcome ` to the OptimizationControlMechanism's `control_signals, + that compute their `values ` which, in turn, are assigned to their `ControlProjections + ` to `modulate the Parameters ` they control when the + Composition is next executed. + +.. _OptimizationControlMechanism_Estimation_Randomization: + +*Randomization of Estimation* +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If `num_estimates ` is specified (i.e., it is not None), then each +`control_allocation ` is independently evaluated `num_estimates +` times (i.e., by that number of calls to the +OptimizationControlMechanism's `evaluate_agent_rep ` method). +The values of Components listed in the OptimizationControlMechanism's `random_variables +` attribute are randomized over thoese estimates. By default, +this includes all Components in the `agent_rep ` with random variables (listed +in its `random_variables ` attribute). However, if particular Components are specified +in the **random_variables** argument of the OptimizationControlMechanism's constructor, then randomization is +restricted to their values. Randomization over estimates can be further configured using the `initial_seed +` and `same_seed_for_all_allocations +` attributes. The results of all the estimates for a given +`control_allocation ` are aggregated by the `aggregation_function +` of the `OptimizationFunction` assigned to the +OptimizationControlMechanism's `function `, and used to compute the `net_outcome +` over the estimates for that `control_allocation ` +(see `OptimizationControlMechanism_Execution` for additional details). COMMENT: .. _OptimizationControlMechanism_Examples: @@ -384,7 +784,7 @@ Examples -------- -The table below lists `model-free ` and `model-based +The table below lists `model-free ` and `model-based ` that adjusts its `ControlSignals ` to optimize - performance of the `Composition` to which it belongs. See parent class for additional arguments. + performance of the `Composition` to which it belongs. See `ControlMechanism ` + for arguments not described here. Arguments --------- - state_features : Mechanism, OutputPort, Projection, dict, or list containing any of these - specifies Components, the values of which are assigned to `state_feature_values + state_features : Mechanism, InputPort, OutputPort, Projection, dict, or list containing any of these + specifies Components for which `state_input_ports ` + are created, the `values ` of which are assigned to `state_feature_values ` and used to predict `net_outcome `. Any `InputPort specification ` - can be used that resolves to an `OutputPort` that projects to that InputPort. + can be used that resolves to an `OutputPort` that projects to that InputPort (see + `state_features ` for additional details>). - state_feature_function : Function or function : default None - specifies the `function ` for the `InputPort` assigned to each `state_feature_value - `. + state_feature_functions : Function or function : default None + specifies the `function ` assigned the `InputPort` in `state_input_ports + ` assigned to each **state_feature** + (see `state_feature_functions ` for additional details). - agent_rep : None : default Composition to which the OptimizationControlMechanism is assigned - specifies the `Composition` used by the `evalution_function ` + agent_rep : None or Composition : default None or Composition to which OptimizationControlMechanism is assigned + specifies the `Composition` used by `evaluate_agent_rep ` to predict the `net_outcome ` for a given `state `. If a Composition is specified, it must be suitably configured - (see `above ` for additional details). If it is not specified, the - OptimizationControlMechanism is placed in `deferred_init` status until it is assigned as the `controller - ` of a Composition, at which that Composition is assigned as the `agent_rep - ` for additional details). It can also be a + `CompositionFunctionApproximator`, or subclass of one, used for `model-free + ` optimization. If **agent_rep** is not specified, the + OptimizationControlMechanism is placed in `deferred_init ` status until it is assigned + as the `controller ` of a Composition, at which time that Composition is assigned as + the `agent_rep `. + + num_estimates : int : 1 + specifies the number independent runs of `agent_rep ` used + to estimate its `net_outcome ` for each `control_allocation + ` sampled (see `num_estimates + ` for additional information). + + random_variables : Parameter or list[Parameter] : default ALL + specifies the Components with random variables to be randomized over different estimates + of each `control_allocation `; these must be in the `agent_rep + ` and have a `seed` `Parameter`. By default, all such Components in + the `agent_rep ` (listed in its `random_variables + ` attribute) are included (see `random_variables + ` for additional information). + + initial_seed : int : default None + specifies the seed used to initialize the random number generator at construction. + If it is not specified then then the seed is set to a random value (see `initial_seed + ` for additional information). + + same_seed_for_all_parameter_combinations : bool : default False + specifies whether the random number generator is re-initialized to the same value when estimating each + `control_allocation ` (see `same_seed_for_all_parameter_combinations + ` for additional information). + + num_trials_per_estimate : int : default None + specifies the number of trials to execute in each run of `agent_rep + ` by a call to `evaluate_agent_rep + ` (see `num_trials_per_estimate + ` for additional information). search_function : function or method specifies the function assigned to `function ` as its `search_function ` parameter, unless that is specified in a constructor for `function `. It must take as its arguments an array with the same shape as `control_allocation ` and an integer - (indicating the iteration of the `optimization process `), and return + (indicating the iteration of the `optimization process `), and return an array with the same shape as `control_allocation `. search_termination_function : function or method @@ -539,7 +984,7 @@ class OptimizationControlMechanism(ControlMechanism): arguments an array with the same shape as `control_allocation ` and two integers (the first representing the `net_outcome ` for the current `control_allocation `, and the second the current iteration of the - `optimization process `); it must return `True` or `False`. + `optimization process `); it must return `True` or `False`. search_space : iterable [list, tuple, ndarray, SampleSpec, or SampleIterator] | list, tuple, ndarray, SampleSpec, or SampleIterator specifies the `search_space ` parameter for `function @@ -558,56 +1003,151 @@ class OptimizationControlMechanism(ControlMechanism): Attributes ---------- - state_feature_values : 2d array - the current value of each item of the OptimizationControlMechanism's `state_features - ` (each of which is a 1d array). - agent_rep : Composition - determines the `Composition` used by the `evalution_function ` - to predict the `net_outcome ` for a given `state - ` (see `above `for additional - details). + determines the `Composition` used by the `evaluate_agent_rep ` + method to predict the `net_outcome ` for a given `state + `; see `Agent Representation ` + for additional details. + + agent_rep_type : None, COMPOSITION or COMPOSITION_FUNCTION_APPROXIMATOR + identifies whether the agent_rep is a `Composition`, a `CompositionFunctionApproximator` or + one of its subclasses, or it has not been assigned (None); see `Agent Representation and Types + of Optimization ` for additional details. + + state_feature_values : 2d array + the current value of each item of the OptimizationControlMechanism's + `OptimizationControlMechanism_State_Features` (each of which is a 1d array). + + state_input_ports : ContentAddressableList + lists the OptimizationControlMechanism's `InputPorts ` that receive `Projections ` + from the items specified in the **state_features** argument in the OptimizationControlMechanism's constructor + or constructed automatically (see `state_features `), and + that provide the `state_feature_values ` to the `agent_rep + ` (see `OptimizationControlMechanism_State_Features` for additional details). + + num_state_input_ports : int + cantains the number of `state_input_ports `. + + outcome_input_ports : ContentAddressableList + lists the OptimizationControlMechanism's `OutputPorts ` that receive `Projections ` + from either its `objective_mechanism ` or the Components listed in + its `monitor_for_control ` attribute, the values of which are used + to compute the `net_outcome ` of executing the `agent_rep + ` in a given `OptimizationControlMechanism_State` + (see `Outcome ` for additional details). + + num_estimates : int + determines the number independent runs of `agent_rep ` (i.e., calls to + `evaluate_agent_rep `) used to estimate the `net_outcome + ` of each `control_allocation ` evaluated + by the OptimizationControlMechanism's `function ` (i.e., + that are specified by its `search_space `); see + `OptimizationControlMechanism_Estimation_Randomization` for additional details. + + random_variables : Parameter or List[Parameter] + list of the Components with variables that are randomized over estimates for a given `control_allocation + `; by default, all Components in the `agent_rep + ` with random variables are included (listed in its `random_variables + ` attribute); see `OptimizationControlMechanism_Estimation_Randomization` + for additional details. + + initial_seed : int or None + determines the seed used to initialize the random number generator at construction. + If it is not specified then then the seed is set to a random value, and different runs of a + Composition containing the OptimizationControlMechanism will yield different results, which should be roughly + comparable if the estimation process is stable. If **initial_seed** is specified, then running the Composition + should yield identical results for the estimation process, which can be useful for debugging. + + same_seed_for_all_allocations : bool + determines whether the random number generator used to select seeds for each estimate of the `agent_rep + `\\'s `net_outcome ` is re-initialized + to the same value for each `control_allocation ` evaluated. + If same_seed_for_all_allocations is True, then any differences in the estimates made of `net_outcome + ` for each `control_allocation ` will + reflect exclusively the influence of the different control_allocations on the execution of the `agent_rep + `, and *not* any variability intrinsic to the execution of + the Composition itself (e.g., any of its Components). This can be confirmed by identical results for repeated + executions of the OptimizationControlMechanism's `evaluate_agent_rep + ` method for the same `control_allocation + `. If same_seed_for_all_allocations is False, then each time a + `control_allocation ` is estimated, it will use a different set of seeds. + This can be confirmed by differing results for repeated executions of the OptimizationControlMechanism's + `evaluate_agent_rep ` method with the same `control_allocation + `). Small differences in results suggest + stability of the estimation process across `control_allocations `, while + substantial differences indicate instability, which may be helped by increasing `num_estimates + `. + + num_trials_per_estimate : int or None + imposes an exact number of trials to execute in each run of `agent_rep ` + used to evaluate its `net_outcome ` by a call to the + OptimizationControlMechanism's `evaluate_agent_rep ` method. + If it is None (the default), then either the number of **inputs** or the value specified for **num_trials** in + the Composition's `run ` method used to determine the number of trials executed (see + `number of trials ` for additional information). function : OptimizationFunction, function or method takes current `control_allocation ` (as initializer), uses its `search_function ` to select samples of `control_allocation ` from its `search_space `, - evaluates these using its `evaluation_function `, and returns - the one that yields the optimal `net_outcome ` (see `Function - ` for additional details). - - evaluation_function : function or method - returns `net_outcome ` for a given `state ` - (i.e., combination of `state_feature_values ` and - `control_allocation `. It is assigned as the `objective_function + evaluates these using its `evaluate_agent_rep ` method by + calling it `num_estimates ` times to estimate its `net_outcome + `net_outcome ` for a given `control_allocation + `, and returns the one that yields the optimal `net_outcome + ` (see `Function ` for additional details). + + evaluate_agent_rep : function or method + returns the `net_outcome(s) ` for a given `state + ` (i.e., combination of `state_feature_values + ` and `control_allocation + `). It is assigned as the `objective_function ` parameter of `function `, and calls the `evaluate` method of the OptimizationControlMechanism's - `agent_rep ` with a `control_allocation - `, the OptimizationControlMechanism's `num_estimates - ` attribute, and the current `state_feature_values - `. + `agent_rep ` with the current `state_feature_values + ` and a specified `control_allocation + `, which runs of the `agent_rep + ` for `num_trials_per_estimate + ` trials. It returns an array containing the + `net_outcome ` of the run and, if the **return_results** argument is True, + an array containing the `results ` of the run. This method is `num_estimates + ` times by the OptimizationControlMechanism's `function + `, which aggregates the `net_outcome ` + over those in evaluating a given `control_allocation ` + (see `OptimizationControlMechanism_Function` for additional details). - COMMENT: search_function : function or method `search_function ` assigned to `function `; used to select samples of `control_allocation - ` to evaluate by `evaluation_function - `. + ` to evaluate by `evaluate_agent_rep + `. search_termination_function : function or method `search_termination_function ` assigned to `function `; determines when to terminate the - `optimization process `. - COMMENT + `optimization process `. + + control_signals : ContentAddressableList[ControlSignal] + list of the `ControlSignals ` for the OptimizationControlMechanism for the Parameters being + optimized by the OptimizationControlMechanism, including any inherited from the `Composition` for which it is + the `controller ` (this is the same as ControlMechanism's `output_ports + ` attribute). Each sends a `ControlProjection` to the `ParameterPort` for the + Parameter it controls when evaluating a `control_allocation `. If + `num_estimates ` is specified (that is, it is not None), a + `ControlSignal` is added to control_signals, named *RANDOMIZATION_CONTROL_SIGNAL*, that is used to randomize + estimates of `outcome ` for a given `control_allocation + ` (see `OptimizationControlMechanism_Estimation_Randomization` for + details.) control_allocation_search_space : list of SampleIterators - `search_space ` assigned by default to `function - `, that determines the samples of - `control_allocation ` evaluated by the `evaluation_function - `. This is a proprety that, unless overridden, - returns a list of the `SampleIterators ` generated from the `allocation_sample - ` specifications for each of the OptimizationControlMechanism's - `control_signals `. + `search_space ` assigned by default to the + OptimizationControlMechanism's `function `, that determines the + samples of `control_allocation ` evaluated by the `evaluate_agent_rep + ` method. This is a property that, unless overridden, + returns a list of the `SampleIterators ` generated from the `allocation_samples + ` specifications for each of the OptimizationControlMechanism's + `control_signals `, and includes the + *RANDOMIZATION_CONTROL_SIGNAL* used to randomize estimates of each `control_allocation + ` (see `note ` above). saved_samples : list contains all values of `control_allocation ` sampled by `function @@ -621,12 +1161,12 @@ class OptimizationControlMechanism(ControlMechanism): is `True`; otherwise list is empty. search_statefulness : bool : True - if set to False, an `OptimizationControlMechanism`\\ 's `evaluation_function` will never run simulations; the - evaluations will simply execute in the original `execution context <_Execution_Contexts>`. - - if set to True, `simulations ` will be created normally for each - `control allocation `. - + if True (the default), calls to `evaluate_agent_rep ` + by the OptimizationControlMechanism's `function ` for each + `control_allocation ` will run as simulations in their own + `execution contexts `. If *search_statefulness* is False, calls for each + `control_allocation ` will not be executed as independent simulations; + rather, all will be run in the same (original) execution context. """ componentType = OPTIMIZATION_CONTROL_MECHANISM @@ -665,8 +1205,8 @@ class Parameters(ControlMechanism.Parameters): :default value: None :type: - state_feature_function - see `state_feature_function ` + state_feature_functions + see `state_feature_functions ` :default value: None :type: @@ -678,7 +1218,7 @@ class Parameters(ControlMechanism.Parameters): :type: input_ports - see `input_ports ` + see `input_ports ` :default value: ["{name: OUTCOME, params: {internal_only: True}}"] :type: ``list`` @@ -690,6 +1230,12 @@ class Parameters(ControlMechanism.Parameters): :default value: None :type: + num_trials_per_estimate + see `num_trials_per_estimate ` + + :default value: None + :type: + saved_samples see `saved_samples ` @@ -720,8 +1266,9 @@ class Parameters(ControlMechanism.Parameters): :default value: None :type: """ + outcome_input_ports_option = Parameter(CONCATENATE, stateful=False, loggable=False, structural=True) function = Parameter(GridSearch, stateful=False, loggable=False) - state_feature_function = Parameter(None, reference=True, stateful=False, loggable=False) + state_feature_functions = Parameter(None, reference=True, stateful=False, loggable=False) search_function = Parameter(None, stateful=False, loggable=False) search_space = Parameter(None, read_only=True) search_termination_function = Parameter(None, stateful=False, loggable=False) @@ -730,23 +1277,25 @@ class Parameters(ControlMechanism.Parameters): agent_rep = Parameter(None, stateful=False, loggable=False, pnl_internal=True, structural=True) - state_feature_values = Parameter(_parse_state_feature_values_from_variable([defaultControlAllocation]), + # FIX: NEED TO MODIFY IF OUTCOME InputPorts ARE MOVED (CHANGE 1 to 0? IF STATE_INPUT_PORTS ARE FIRST) + state_feature_values = Parameter(_parse_state_feature_values_from_variable(1, [defaultControlAllocation]), user=False, pnl_internal=True) - input_ports = Parameter( - [{NAME: OUTCOME, PARAMS: {INTERNAL_ONLY: True}}], - stateful=False, - loggable=False, - read_only=True, - structural=True, - parse_spec=True, - aliases='state_features', - constructor_argument='state_features' - ) + # FIX: Should any of these be stateful? + random_variables = ALL + initial_seed = None + same_seed_for_all_allocations = False num_estimates = None + num_trials_per_estimate = None + # search_space = None - control_allocation_search_space = Parameter(None, read_only=True, getter=_control_allocation_search_space_getter) + control_allocation_search_space = Parameter( + None, + read_only=True, + getter=_control_allocation_search_space_getter, + dependencies='search_space' + ) saved_samples = None saved_values = None @@ -755,10 +1304,14 @@ class Parameters(ControlMechanism.Parameters): @tc.typecheck def __init__(self, agent_rep=None, - function=None, state_features: tc.optional(tc.optional(tc.any(Iterable, Mechanism, OutputPort, InputPort))) = None, - state_feature_function: tc.optional(tc.optional(tc.any(is_function_type))) = None, + state_feature_functions: tc.optional(tc.optional(tc.any(dict, is_function_type))) = None, + function=None, num_estimates = None, + random_variables = None, + initial_seed=None, + same_seed_for_all_allocations=None, + num_trials_per_estimate = None, search_function: tc.optional(tc.optional(tc.any(is_function_type))) = None, search_termination_function: tc.optional(tc.optional(tc.any(is_function_type))) = None, search_statefulness=None, @@ -780,17 +1333,18 @@ def __init__(self, kwargs.pop('features') continue if k == 'feature_function': - if state_feature_function: - warnings.warn(f"Both 'feature_function' and 'state_feature_function' were specified in the " + if state_feature_functions: + warnings.warn(f"Both 'feature_function' and 'state_feature_functions' were specified in the " f"constructor for an {self.__class__.__name__}. Note: 'feature_function' has been " - f"deprecated; 'state_feature_function' ({state_feature_function}) will be used.") + f"deprecated; 'state_feature_functions' ({state_feature_functions}) will be used.") else: warnings.warn(f"'feature_function' was specified in the constructor for an" f"{self.__class__.__name__}; Note: 'feature_function' has been deprecated; " - f"please use 'state_feature_function' in the future.") - state_feature_function = kwargs['feature_function'] + f"please use 'state_feature_functions' in the future.") + state_feature_functions = kwargs['feature_function'] kwargs.pop('feature_function') continue + self.state_features = convert_to_list(state_features) function = function or GridSearch @@ -813,10 +1367,12 @@ def __init__(self, super().__init__( function=function, - input_ports=state_features, - state_features=state_features, - state_feature_function=state_feature_function, + state_feature_functions=state_feature_functions, num_estimates=num_estimates, + num_trials_per_estimate = num_trials_per_estimate, + random_variables=random_variables, + initial_seed=initial_seed, + same_seed_for_all_allocations=same_seed_for_all_allocations, search_statefulness=search_statefulness, search_function=search_function, search_termination_function=search_termination_function, @@ -831,52 +1387,307 @@ def _validate_params(self, request_set, target_set=None, context=None): from psyneulink.core.compositions.composition import Composition if request_set[AGENT_REP] is None: - raise OptimizationControlMechanismError(f"The {repr(AGENT_REP)} arg of an {self.__class__.__name__} must " + raise OptimizationControlMechanismError(f"The '{AGENT_REP}' arg of an {self.__class__.__name__} must " f"be specified and be a {Composition.__name__}") elif not (isinstance(request_set[AGENT_REP], Composition) or (isinstance(request_set[AGENT_REP], type) and issubclass(request_set[AGENT_REP], Composition))): - raise OptimizationControlMechanismError(f"The {repr(AGENT_REP)} arg of an {self.__class__.__name__} " + raise OptimizationControlMechanismError(f"The '{AGENT_REP}' arg of an {self.__class__.__name__} " f"must be either a {Composition.__name__} or a sublcass of one") + elif request_set[STATE_FEATURE_FUNCTIONS]: + state_feats = request_set.pop(STATE_FEATURES, None) + state_feat_fcts = request_set.pop(STATE_FEATURE_FUNCTIONS, None) + # If no or only one item is specified in state_features, only one state_function is allowed + if ((not state_feats or len(convert_to_list(state_feats))==1) + and len(convert_to_list(state_feat_fcts))!=1): + raise OptimizationControlMechanismError(f"Only one function is allowed to be specified for " + f"the '{STATE_FEATURE_FUNCTIONS}' arg of {self.name} " + f"if either no only one items is specified for its " + f"'{STATE_FEATURES}' arg.") + if len(convert_to_list(state_feat_fcts))>1 and not isinstance(state_feat_fcts, dict): + raise OptimizationControlMechanismError(f"The '{STATE_FEATURES}' arg of {self.name} contains more " + f"than one item, so its '{STATE_FEATURE_FUNCTIONS}' arg " + f"must be either only a single function (applied to all " + f"{STATE_FEATURES}) or a dict with entries of the form " + f":.") + if len(convert_to_list(state_feat_fcts))>1: + invalid_fct_specs = [fct_spec for fct_spec in state_feat_fcts if fct_spec not in state_feats] + if invalid_fct_specs: + raise OptimizationControlMechanismError(f"The following entries of the dict specified for " + f"'{STATE_FEATURE_FUNCTIONS} of {self.name} have keys that " + f"do not match any InputPorts specified in its " + f"{STATE_FEATURES} arg: {invalid_fct_specs}.") + + + if self.random_variables is not ALL: + # invalid_params = [param.name for param in self.random_variables + # if param not in [r._owner._owner for r in self.agent_rep.random_variables]] + # if invalid_params: + # raise OptimizationControlMechanismError(f"The following Parameters were specified for the " + # f"{RANDOM_VARIABLES} arg of {self.name} that are do randomizable " + # f"(i.e., they do not have a 'seed' attribute: " + # f"{invalid_params}.") + invalid_params = [param.name for param in self.random_variables + if param not in self.agent_rep.random_variables] + if invalid_params: + raise OptimizationControlMechanismError(f"The following Parameters were specified for the " + f"{RANDOM_VARIABLES} arg of {self.name} that are do randomizable " + f"(i.e., they do not have a 'seed' attribute: " + f"{invalid_params}.") + + # FIX: CONSIDER GETTING RID OF THIS METHOD ENTIRELY, AND LETTING state_input_ports + # BE HANDLED ENTIRELY BY _update_state_input_ports_for_controller def _instantiate_input_ports(self, context=None): - """Instantiate input_ports for Projections from state_features and objective_mechanism. - - Inserts InputPort specification for Projection from ObjectiveMechanism as first item in list of - InputPort specifications generated in _parse_state_feature_specs from the **state_features** and - **state_feature_function** arguments of the OptimizationControlMechanism constructor. + """Instantiate InputPorts for state_features (with state_feature_functions if specified). + + This instantiates the OptimizationControlMechanism's `state_input_ports; + these are used to provide input to the agent_rep when its evaluate method is called + (see Composition._build_predicted_inputs_dict). + The OptimizationCOntrolMechanism's outcome_input_ports are instantiated by + ControlMechanism._instantiate_input_ports in the call to super(). + + InputPorts are constructed for **state_features** by calling _parse_state_feature_specs + with them and **state_feature_functions** arguments of the OptimizationControlMechanism constructor. + The constructed state_input_ports are passed to ControlMechanism_instantiate_input_ports(), + which appends them to the InputPort(s) that receive input from the **objective_mechanism* (if specified) + or **monitor_for_control** ports (if **objective_mechanism** is not specified). + Also ensures that: + - every state_input_port has only a single Projection; + - every outcome_input_ports receive Projections from within the agent_rep if it is a Composition. + + If no **state_features** are specified in the constructor, assign ones for INPUT Nodes of owner. + - warn for model-free `model-free optimization `. + - ignore here for `model-based optimization ` + (handled in _update_state_input_ports_for_controller) + + See `state_features ` and + `OptimizationControlMechanism_State_Features` for additional details. """ - # Specify *OUTCOME* InputPort; receives Projection from *OUTCOME* OutputPort of objective_mechanism - outcome_input_port = {NAME:OUTCOME, PARAMS:{INTERNAL_ONLY:True}} - - # If any state_features were specified (assigned to self.input_ports in __init__): - if self.input_ports: - input_ports = _parse_shadow_inputs(self, self.input_ports) - input_ports = self._parse_state_feature_specs(input_ports, self.state_feature_function) - # Insert primary InputPort for outcome from ObjectiveMechanism; - # assumes this will be a single scalar value and must be named OUTCOME by convention of ControlSignal - input_ports.insert(0, outcome_input_port), - else: - input_ports = [outcome_input_port] - - self.parameters.input_ports._set(input_ports, context) + # If any state_features were specified parse them and pass to ControlMechanism._instantiate_input_ports() + state_input_ports_specs = None - # Configure default_variable to comport with full set of input_ports - self.defaults.variable, _ = self._handle_arg_input_ports(self.input_ports) + # FIX: 11/3/21 : + # ADD CHECK IN _parse_state_feature_specs THAT IF A NODE RATHER THAN InputPort IS SPECIFIED, + # ITS PRIMARY IS USED (SEE SCRATCH PAD FOR EXAMPLES) + if not self.state_features: + # For model-free (agent_rep = CompositionFunctionApproximator), warn if no state_features specified. + # Note: for model-based optimization, state_input_ports and any state_feature_functions specified + # are assigned in _update_state_input_ports_for_controller. + if self.agent_rep_type == COMPOSITION_FUNCTION_APPROXIMATOR: + warnings.warn(f"No 'state_features' specified for use with `agent_rep' of {self.name}") - super()._instantiate_input_ports(context=context) - - for i in range(1, len(self.input_ports)): + else: + # FIX: 11/29/21: DISALLOW FOR COMPOSITION + # Implement any specified state_features + state_input_ports_specs = self._parse_state_feature_specs(self.state_features, + self.state_feature_functions) + # Note: + # if state_features were specified for model-free (i.e., agent_rep is a CompositionFunctionApproximator), + # assume they are OK (no way to check their validity for agent_rep.evaluate() method, and skip assignment + + # Pass state_input_ports_sepcs to ControlMechanism for instantiation and addition to OCM's input_ports + super()._instantiate_input_ports(state_input_ports_specs, context=context) + + # Assign to self.state_input_ports attribute + start = self.num_outcome_input_ports # FIX: 11/3/21 NEED TO MODIFY IF OUTCOME InputPorts ARE MOVED + stop = start + len(state_input_ports_specs) if state_input_ports_specs else 0 + # FIX 11/3/21: THIS SHOULD BE MADE A PARAMETER + self.state_input_ports = ContentAddressableList(component_type=InputPort, + list=self.input_ports[start:stop]) + + # Ensure that every state_input_port has no more than one afferent projection + # FIX: NEED TO MODIFY IF OUTCOME InputPorts ARE MOVED + for i in range(self.num_outcome_input_ports, self.num_state_input_ports): port = self.input_ports[i] if len(port.path_afferents) > 1: raise OptimizationControlMechanismError(f"Invalid {type(port).__name__} on {self.name}. " f"{port.name} should receive exactly one projection, " f"but it receives {len(port.path_afferents)} projections.") + def _validate_monitor_for_control(self, nodes): + # Ensure all of the Components being monitored for control are in the agent_rep if it is Composition + if self.agent_rep_type == COMPOSITION: + try: + super()._validate_monitor_for_control(self.agent_rep._get_all_nodes()) + except ControlMechanismError as e: + raise OptimizationControlMechanismError(f"{self.name} has 'outcome_ouput_ports' that receive " + f"Projections from the following Components that do not " + f"belong to its {AGENT_REP} ({self.agent_rep.name}): {e.data}.") + + # FIX: 12/9/21 -- DEPRECATE DIRECT PROJECTIONS FROM PROBES, ELIMINATING THE NEED FOR THIS OVERRIDE + # def _parse_monitor_for_control_input_ports(self, context): + # """Override ControlMechanism to implement allow_probes=DIRECT option + # + # If is False (default), simply pass results of super()._parse_monitor_for_control_input_ports(context); + # this is restricted to the use of OUTPUT Nodes in nested Compositions, and routes Projections from nodes in + # nested Compositions through their respective output_CIMs. + # + # If allow_probes option is True, any INTERNAL Nodes of nested Compositions specified in monitor_for_control + # are assigned NodeRole.OUTPUT, and Projections from them to the OptimizationControlMechanism are routed + # from the nested Composition(s) through the respective output_CIM(s). + # + # If allow_probes option is DIRECT, Projection specifications are added to Port specification dictionaries, + # so that the call to super()._instantiate_input_ports in ControlMechanism instantiates Projections from + # monitored node to OptimizationControlMechanism. This allows *direct* Projections from monitored nodes in + # nested Compositions to the OptimizationControlMechanism, bypassing output_CIMs and preventing inclusion + # of their values in the results attribute of those Compositions. + # + # Return port specification dictionaries (*with* Projection specifications), their value sizes and null list + # (to suppress Projection assignment to aux_components in ControlMechanism._instantiate_input_ports) + # """ + # + # outcome_input_port_specs, outcome_value_sizes, monitored_ports \ + # = super()._parse_monitor_for_control_input_ports(context) + # + # if self.allow_probes == DIRECT: + # # Add Projection specifications to port specification dictionaries for outcome_input_ports + # # and return monitored_ports = [] + # + # if self.outcome_input_ports_option == SEPARATE: + # # Add port spec to to each outcome_input_port_spec (so that a Projection is specified directly to each) + # for i in range(self.num_outcome_input_ports): + # outcome_input_port_specs[i].update({PROJECTIONS: monitored_ports[i]}) + # else: + # # Add all ports specs as list to single outcome_input_port + # outcome_input_port_specs[0].update({PROJECTIONS: monitored_ports}) + # + # # Return [] for ports to suppress creation of Projections in _instantiate_input_ports + # monitored_ports = [] + # + # return outcome_input_port_specs, outcome_value_sizes, monitored_ports + + def _update_state_input_ports_for_controller(self, context=None): + """Check and update state_input_ports for model-based optimization (agent_rep==Composition) + + If no agent_rep has been specified or it is model-free, return + (note: validation of state_features specified for model-free optimization is up to the + CompositionFunctionApproximator) + + For model-based optimization (agent_rep is a Composition): + + - ensure that state_input_ports for all specified state_features are for InputPorts of INPUT Nodes of agent_rep; + raises an error if any receive a Projection that is not a shadow Projection from an INPUT Node of agent_rep + (note: there should already be state_input_ports for any **state_features** specified in the constructor). + + - if no state_features specified, assign a state_input_port for every InputPort of every INPUT Node of agent_rep + (note: shadow Projections for all state_input_ports are created in Composition._update_shadow_projections()). + + - assign state_feature_functions to relevant state_input_ports (same function for all if no state_features + are specified or only one state_function is specified; otherwise, use dict for specifications). + """ + + # FIX: 11/15/21 - REPLACE WITH ContextFlags.PROCESSING ?? + # TRY TESTS WITHOUT THIS + # Don't instantiate unless being called by Composition.run() (which does not use ContextFlags.METHOD) + # This avoids error messages if called prematurely (i.e., before run is complete) + # MODIFIED 11/29/21 OLD: + if context.flags & ContextFlags.METHOD: + return + # MODIFIED 11/29/21 END + + # Don't bother for model-free optimization (see OptimizationControlMechanism_Model_Free) + # since state_input_ports specified or model-free optimization are entirely the user's responsibility; + # this is because they can't be programmatically validated against the agent_rep's evaluate() method. + # (This contrast with model-based optimization, for which there must be a state_input_port for every + # InputPort of every INPUT node of the agent_rep (see OptimizationControlMechanism_Model_Based). + if self.agent_rep_type != COMPOSITION: + return + + from psyneulink.core.compositions.composition import Composition, NodeRole, CompositionInterfaceMechanism + + def _get_all_input_nodes(comp): + """Return all input_nodes, including those for any Composition nested one level down. + Note: more deeply nested Compositions will either be served by their containing one(s) or own controllers + """ + _input_nodes = comp.get_nodes_by_role(NodeRole.INPUT) + input_nodes = [] + for node in _input_nodes: + if isinstance(node, Composition): + input_nodes.extend(_get_all_input_nodes(node)) + else: + input_nodes.append(node) + return input_nodes + + if self.state_features: + # FIX: 11/26/21 - EXPLAIN THIS BEHAVIOR IN DOSCSTRING; + warnings.warn(f"The 'state_features' argument has been specified for {self.name}, that is being " + f"configured as a model-based {self.__class__.__name__} (i.e, one that uses a " + f"{Composition.componentType} as its agent_rep). This overrides automatic assignment of " + f"all inputs to its agent_rep ({self.agent_rep.name}) as the 'state_features'; only the " + f"ones specified will be used ({self.state_features}), and they must match the shape of the " + f"input to {self.agent_rep.name} when it is run. Remove this specification from the " + f"constructor for {self.name} if automatic assignment is preferred.") + + comp = self.agent_rep + # Ensure that all InputPorts shadowed by specified state_input_ports + # are in agent_rep or one of its nested Compositions + invalid_state_features = [input_port for input_port in self.state_input_ports + if (not (input_port.shadow_inputs.owner in + list(comp.nodes) + [n[0] for n in comp._get_nested_nodes()]) + and (not [input_port.shadow_inputs.owner.composition is x for x in + comp._get_nested_compositions() + if isinstance(input_port.shadow_inputs.owner, + CompositionInterfaceMechanism)]))] + if any(invalid_state_features): + raise OptimizationControlMechanismError(f"{self.name}, being used as controller for model-based " + f"optimization of {self.agent_rep.name}, has 'state_features' " + f"specified ({[d.name for d in invalid_state_features]}) that " + f"are missing from the Composition or any nested within it.") + + # Ensure that all InputPorts shadowed by specified state_input_ports + # reference INPUT Nodes of agent_rep or of a nested Composition + invalid_state_features = [input_port for input_port in self.state_input_ports + if (not (input_port.shadow_inputs.owner in _get_all_input_nodes(self.agent_rep)) + and (isinstance(input_port.shadow_inputs.owner, + CompositionInterfaceMechanism) + and not (input_port.shadow_inputs.owner.composition in + [nested_comp for nested_comp in comp._get_nested_compositions() + if nested_comp in comp.get_nodes_by_role(NodeRole.INPUT)])))] + if any(invalid_state_features): + raise OptimizationControlMechanismError(f"{self.name}, being used as controller for model-based " + f"optimization of {self.agent_rep.name}, has 'state_features' " + f"specified ({[d.name for d in invalid_state_features]}) that " + f"are not INPUT nodes for the Composition or any nested " + f"within it.") + return + + # Model-based agent_rep, but no state_features have been specified, + # so assign a state_input_port to shadow every InputPort of every INPUT node of agent_rep + shadow_input_ports = [] + for node in _get_all_input_nodes(self.agent_rep): + for input_port in node.input_ports: + if input_port.internal_only: + continue + # if isinstance(input_port.owner, CompositionInterfaceMechanism): + # input_port = input_port. + shadow_input_ports.append(input_port) + + local_context = Context(source=ContextFlags.METHOD) + state_input_ports_to_add = [] + # for input_port in input_ports_not_specified: + for input_port in shadow_input_ports: + input_port_name = f"{SHADOW_INPUT_NAME} of {input_port.owner.name}[{input_port.name}]" + params = {SHADOW_INPUTS: input_port, + INTERNAL_ONLY:True} + # Note: state_feature_functions has been validated _validate_params + # to have only a single function in for model-based agent_rep + if self.state_feature_functions: + params.update({FUNCTION: self._parse_state_feature_function(self.state_feature_functions)}) + state_input_ports_to_add.append(_instantiate_port(name=input_port_name, + port_type=InputPort, + owner=self, + reference_value=input_port.value, + params=params, + context=local_context)) + self.add_ports(state_input_ports_to_add, + update_variable=False, + context=local_context) + self.state_input_ports.extend(state_input_ports_to_add) + def _instantiate_output_ports(self, context=None): """Assign CostFunctions.DEFAULTS as default for cost_option of ControlSignals. - OptimizationControlMechanism requires use of at least one of the cost options """ super()._instantiate_output_ports(context) @@ -887,16 +1698,47 @@ def _instantiate_output_ports(self, context=None): def _instantiate_control_signals(self, context): """Size control_allocation and assign modulatory_signals - Set size of control_allocadtion equal to number of modulatory_signals. + Set size of control_allocation equal to number of modulatory_signals. Assign each modulatory_signal sequentially to corresponding item of control_allocation. + Assign RANDOMIZATION_CONTROL_SIGNAL for random_variables """ - from psyneulink.core.globals.keywords import OWNER_VALUE - output_port_specs = list(enumerate(self.output_ports)) - for i, spec in output_port_specs: + + # MODIFIED 11/21/21 NEW: + # FIX - PURPOSE OF THE FOLLOWING IS TO "CAPTURE" CONTROL SPECS MADE LOCALLY ON MECHANISMS IN THE COMP + # AND INSTANTIATE ControlSignals FOR THEM HERE, ALONG WITH THOSE SPECIFIED IN THE CONSTRUCTOR + # FOR THE OCM. ALSO CAPTURES DUPLICATES (SEE MOD BELOW). + # FIX: WITHOUT THIS, GET THE mod param ERROR; WITH IT, GET FAILURES IN test_control: + # TestModelBasedOptimizationControlMechanisms_Execution + # test_evc + # test_stateful_mechanism_in_simulation + # TestControlMechanisms: + # test_lvoc + # test_lvoc_both_prediction_specs + # test_lvoc_features_function + # if self.agent_rep and self.agent_rep.componentCategory=='Composition': + # control_signals_from_composition = self.agent_rep._get_control_signals_for_composition() + # self.output_ports.extend(control_signals_from_composition) + # MODIFIED 11/21/21 END + control_signals = [] + for i, spec in list(enumerate(self.output_ports)): control_signal = self._instantiate_control_signal(spec, context=context) control_signal._variable_spec = (OWNER_VALUE, i) + # MODIFIED 11/20/21 NEW: + # FIX - SHOULD MOVE THIS TO WHERE IT IS CALLED IN ControlSignal._instantiate_control_signal + if self._check_for_duplicates(control_signal, control_signals, context): + continue + # MODIFIED 11/20/21 END self.output_ports[i] = control_signal - self.defaults.value = np.tile(control_signal.parameters.variable.default_value, (i + 1, 1)) + + self._create_randomization_control_signal( + context, + set_control_signal_index=False + ) + + self.defaults.value = np.tile( + control_signal.parameters.variable.default_value, + (len(self.output_ports), 1) + ) self.parameters.control_allocation._set(copy.deepcopy(self.defaults.value), context) def _instantiate_function(self, function, function_params=None, context=None): @@ -912,7 +1754,7 @@ def _instantiate_function(self, function, function_params=None, context=None): super()._instantiate_function(function, function_params, context) def _instantiate_attributes_after_function(self, context=None): - """Instantiate OptimizationControlMechanism's OptimizatonFunction attributes""" + """Instantiate OptimizationControlMechanism's OptimizationFunction attributes""" super()._instantiate_attributes_after_function(context=context) @@ -939,51 +1781,30 @@ def _instantiate_attributes_after_function(self, context=None): # Assign parameters to function (OptimizationFunction) that rely on OptimizationControlMechanism self.function.reset(**{ DEFAULT_VARIABLE: self.parameters.control_allocation._get(context), - OBJECTIVE_FUNCTION: self.evaluation_function, + OBJECTIVE_FUNCTION: self.evaluate_agent_rep, # SEARCH_FUNCTION: self.search_function, # SEARCH_TERMINATION_FUNCTION: self.search_termination_function, - SEARCH_SPACE: self.parameters.control_allocation_search_space._get(context) + SEARCH_SPACE: self.parameters.control_allocation_search_space._get(context), }) if isinstance(self.agent_rep, type): self.agent_rep = self.agent_rep() - from psyneulink.core.compositions.compositionfunctionapproximator import CompositionFunctionApproximator - if (isinstance(self.agent_rep, CompositionFunctionApproximator)): + if self.agent_rep_type == COMPOSITION_FUNCTION_APPROXIMATOR: self._initialize_composition_function_approximator(context) - def _update_input_ports(self, runtime_params=None, context=None): - """Update value for each InputPort in self.input_ports: - - Call execute method for all (MappingProjection) Projections in Port.path_afferents - Aggregate results (using InputPort execute method) - Update InputPort.value - """ - # "Outcome" - outcome_input_port = self.input_port - outcome_input_port._update(params=runtime_params, context=context) - port_values = [np.atleast_2d(outcome_input_port.parameters.value._get(context))] - # MODIFIED 5/8/20 OLD: - # FIX 5/8/20 [JDC]: THIS DOESN'T CALL SUPER, SO NOT IDEAL HOWEVER, REVISION BELOW CRASHES... NEEDS TO BE FIXED - for i in range(1, len(self.input_ports)): - port = self.input_ports[i] - port._update(params=runtime_params, context=context) - port_values.append(port.parameters.value._get(context)) - return convert_to_np_array(port_values) - # # MODIFIED 5/8/20 NEW: - # input_port_values = super()._update_input_ports(runtime_params, context) - # port_values.append(input_port_values) - # return np.array(port_values) - # MODIFIED 5/8/20 END - def _execute(self, variable=None, context=None, runtime_params=None): - """Find control_allocation that optimizes result of `agent_rep.evaluate` .""" + """Find control_allocation that optimizes result of agent_rep.evaluate(). + """ if self.is_initializing: return [defaultControlAllocation] # # FIX: THESE NEED TO BE FOR THE PREVIOUS TRIAL; ARE THEY FOR FUNCTION_APPROXIMATOR? - self.parameters.state_feature_values._set(_parse_state_feature_values_from_variable(variable), context) + # FIX: NEED TO MODIFY IF OUTCOME InputPorts ARE MOVED + self.parameters.state_feature_values._set(_parse_state_feature_values_from_variable( + self.num_outcome_input_ports, + variable), context) # Assign default control_allocation if it is not yet specified (presumably first trial) control_allocation = self.parameters.control_allocation._get(context) @@ -997,7 +1818,8 @@ def _execute(self, variable=None, context=None, runtime_params=None): # have an adapt method, we also don't need to call the net_outcome getter net_outcome = self.parameters.net_outcome._get(context) - self.agent_rep.adapt(_parse_state_feature_values_from_variable(variable), + # FIX: NEED TO MODIFY IF OUTCOME InputPorts ARE MOVED + self.agent_rep.adapt(_parse_state_feature_values_from_variable(self.num_outcome_input_ports, variable), control_allocation, net_outcome, context=context) @@ -1012,8 +1834,9 @@ def _execute(self, variable=None, context=None, runtime_params=None): # IMPLEMENTATION NOTE: skip ControlMechanism._execute since it is a stub method that returns input_values optimal_control_allocation, optimal_net_outcome, saved_samples, saved_values = \ super(ControlMechanism,self)._execute(variable=control_allocation, + num_estimates=self.parameters.num_estimates._get(context), context=context, - runtime_params=runtime_params, + runtime_params=runtime_params ) # clean up frozen values after execution @@ -1048,22 +1871,37 @@ def _tear_down_simulation(self, sim_context=None): if not self.agent_rep.parameters.retain_old_simulation_data._get(): self.agent_rep._delete_contexts(sim_context, check_simulation_storage=True) - def evaluation_function(self, control_allocation, context=None, return_results=False): - """Compute `net_outcome ` for current set of `state_feature_values - ` and a specified `control_allocation - `. + def evaluate_agent_rep(self, control_allocation, context=None, return_results=False): + """Call `evaluate ` method of `agent_rep ` Assigned as the `objective_function ` for the OptimizationControlMechanism's `function `. - Calls `agent_rep `\\'s `evalute` method. + Evaluates `agent_rep ` by calling its `evaluate ` + method, which executes its `agent_rep ` using the current + `state_feature_values ` as the input and the specified + **control_allocation**. + + If the `agent_rep ` is a `Composition`, each execution is a call to + its `run ` method that uses the `num_trials_per_estimate + ` as its **num_trials** argument, and the same + `state_feature_values ` and **control_allocation** + but a different randomly chosen seed for the random number generator for each run. It then returns an array of + length **number_estimates** containing the `net_outcome ` of each execution + and, if **return_results** is True, also an array with the `results ` of each run. - Returns a scalar that is the predicted `net_outcome ` - for the current `state_feature_values ` - and specified `control_allocation `. + COMMENT: + FIX: THIS SHOULD BE REFACTORED TO BE HANDLED THE SAME AS A Composition AS agent_rep + COMMENT + If the `agent_rep ` is a CompositionFunctionApproximator, + then `num_estimates ` is passed to it to handle execution and + estimation as determined by its implementation, and returns a single estimated net_outcome. + + (See `evaluate ` for additional details.) """ - # agent_rep is a Composition (since runs_simuluations = True) + + # agent_rep is a Composition (since runs_simulations = True) if self.agent_rep.runs_simulations: # KDM 5/20/19: crudely using default here because it is a stateless parameter # and there is a bug in setting parameter values on init, see TODO note above @@ -1079,35 +1917,95 @@ def evaluation_function(self, control_allocation, context=None, return_results=F # We shouldn't get this far if execution mode is not Python assert self.parameters.comp_execution_mode._get(context) == "Python" exec_mode = pnlvm.ExecutionMode.Python - result = self.agent_rep.evaluate(self.parameters.state_feature_values._get(context), - control_allocation, - self.parameters.num_estimates._get(context), - base_context=context, - context=new_context, - execution_mode=exec_mode, - return_results=return_results) + ret_val = self.agent_rep.evaluate(self.parameters.state_feature_values._get(context), + control_allocation, + self.parameters.num_trials_per_estimate._get(context), + base_context=context, + context=new_context, + execution_mode=exec_mode, + return_results=return_results) context.composition = old_composition - if self.defaults.search_statefulness: self._tear_down_simulation(new_context) - # If results of the simulation shoudld be returned then, do so. Agent Rep Evaluate will - # return a tuple in this case where the first element is the outcome as usual and the - # results of composision run are the second element. + # FIX: THIS SHOULD BE REFACTORED TO BE HANDLED THE SAME AS A Composition AS agent_rep + # If results of the simulation should be returned then, do so. agent_rep's evaluate method will + # return a tuple in this case in which the first element is the outcome as usual and the second + # is the results of the composition run. if return_results: - return result[0], result[1] + return ret_val[0], ret_val[1] else: - return result + return ret_val + # FIX: 11/3/21 - ??REFACTOR CompositionFunctionApproximator TO NOT TAKE num_estimates + # (i.e., LET OptimzationFunction._grid_evaluate HANDLE IT) # agent_rep is a CompositionFunctionApproximator (since runs_simuluations = False) else: - result = self.agent_rep.evaluate(self.parameters.state_feature_values._get(context), - control_allocation, - self.parameters.num_estimates._get(context), - context=context - ) - - return result + return self.agent_rep.evaluate(self.parameters.state_feature_values._get(context), + control_allocation, + self.parameters.num_estimates._get(context), + self.parameters.num_trials_per_estimate._get(context), + context=context + ) + + def _create_randomization_control_signal( + self, + context, + set_control_signal_index=True + ): + if self.num_estimates: + # must be SampleSpec in allocation_samples arg + randomization_seed_mod_values = SampleSpec(start=1, stop=self.num_estimates, step=1) + + # FIX: 11/3/21 noise PARAM OF TransferMechanism IS MARKED AS SEED WHEN ASSIGNED A DISTRIBUTION FUNCTION, + # BUT IT HAS NO PARAMETER PORT BECAUSE THAT PRESUMABLY IS FOR THE INTEGRATOR FUNCTION, + # BUT THAT IS NOT FOUND BY model.all_dependent_parameters + # Get Components with variables to be randomized across estimates + # and construct ControlSignal to modify their seeds over estimates + if self.random_variables is ALL: + self.random_variables = self.agent_rep.random_variables + + randomization_control_signal = ControlSignal( + name=RANDOMIZATION_CONTROL_SIGNAL, + modulates=[ + param.parameters.seed.port + for param in self.random_variables + ], + allocation_samples=randomization_seed_mod_values + ) + randomization_control_signal_index = len(self.output_ports) + randomization_control_signal._variable_spec = ( + OWNER_VALUE, randomization_control_signal_index + ) + randomization_control_signal = self._instantiate_control_signal( + randomization_control_signal, context + ) + self.output_ports.append(randomization_control_signal) + + # Otherwise, assert that num_estimates and number of seeds generated by randomization_control_signal are equal + num_seeds = self.control_signals[RANDOMIZATION_CONTROL_SIGNAL].parameters.allocation_samples._get(context).num + assert self.num_estimates == num_seeds, \ + f"PROGRAM ERROR: The value of the 'num_estimates' Parameter of {self.name}" \ + f"({self.num_estimates}) is not equal to the number of estimates that will be generated by " \ + f"its {RANDOMIZATION_CONTROL_SIGNAL} ControlSignal ({num_seeds})." + + function_search_space = self.function.parameters.search_space._get(context) + if randomization_control_signal_index >= len(function_search_space): + # TODO: check here if search_space has an item for each + # control_signal? or is allowing it through for future + # checks the right way? + + # search_space must be a SampleIterator + function_search_space.append(SampleIterator(randomization_seed_mod_values)) + + # workaround for fact that self.function.reset call in + # _instantiate_attributes_after_function expects to use + # old/unset values when running _update_default_variable, + # which calls self.agent_rep.evaluate and is brittle. + if set_control_signal_index: + self.function.parameters.randomization_dimension._set( + randomization_control_signal_index, context + ) def _get_evaluate_input_struct_type(self, ctx): # We construct input from optimization function input @@ -1158,6 +2056,11 @@ def _gen_llvm_net_outcome_function(self, *, ctx, tags=frozenset()): ctx.int32_ty(i)]) data_out = builder.gep(op_in, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(0)]) + if data_in.type != data_out.type: + warnings.warn("Shape mismatch: Allocation sample '{}' ({}) doesn't match input port input ({}).".format(i, self.parameters.control_allocation_search_space.get(), op.defaults.variable)) + assert len(data_out.type.pointee) == 1 + data_out = builder.gep(data_out, [ctx.int32_ty(0), ctx.int32_ty(0)]) + builder.store(builder.load(data_in), data_out) # Invoke cost function @@ -1173,7 +2076,7 @@ def _gen_llvm_net_outcome_function(self, *, ctx, tags=frozenset()): val = builder.fadd(val, cost) builder.store(val, total_cost) - # compute net outcome + # compute net_outcome objective = builder.load(objective_ptr) net_outcome = builder.fsub(objective, builder.load(total_cost)) builder.store(net_outcome, arg_out) @@ -1238,13 +2141,26 @@ def _gen_llvm_evaluate_function(self, *, ctx:pnlvm.LLVMBuilderContext, comp_params, base_comp_state, allocation_sample, arg_out, arg_in, base_comp_data = llvm_func.args + if "const_params" in debug_env: + comp_params = builder.alloca(comp_params.type.pointee, name="const_params_loc") + const_params = comp_params.type.pointee(self.agent_rep._get_param_initializer(None)) + builder.store(const_params, comp_params) + # Create a simulation copy of composition state comp_state = builder.alloca(base_comp_state.type.pointee, name="state_copy") - builder.store(builder.load(base_comp_state), comp_state) + if "const_state" in debug_env: + const_state = self.agent_rep._get_state_initializer(None) + builder.store(comp_state.type.pointee(const_state), comp_state) + else: + builder.store(builder.load(base_comp_state), comp_state) # Create a simulation copy of composition data comp_data = builder.alloca(base_comp_data.type.pointee, name="data_copy") - builder.store(builder.load(base_comp_data), comp_data) + if "const_data" in debug_env: + const_data = self.agent_rep._get_data_initializer(None) + builder.store(comp_data.type.pointee(const_data), comp_data) + else: + builder.store(builder.load(base_comp_data), comp_data) # Evaluate is called on composition controller assert self.composition.controller is self @@ -1282,12 +2198,22 @@ def _gen_llvm_evaluate_function(self, *, ctx:pnlvm.LLVMBuilderContext, # Construct input comp_input = builder.alloca(sim_f.args[3].type.pointee, name="sim_input") + input_initialized = [False] * len(comp_input.type.pointee) for src_idx, ip in enumerate(self.input_ports): if ip.shadow_inputs is None: continue + + # shadow inputs point to an input port of of a node. + # If that node takes direct input, it will have an associated + # (input_port, output_port) in the input_CIM. + # Take the former as an index to composition input variable. cim_in_port = self.agent_rep.input_CIM_ports[ip.shadow_inputs][0] dst_idx = self.agent_rep.input_CIM.input_ports.index(cim_in_port) + # Check that all inputs are unique + assert not input_initialized[dst_idx], "Double initialization of input {}".format(dst_idx) + input_initialized[dst_idx] = True + src = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(src_idx)]) # Destination is a struct of 2d arrays dst = builder.gep(comp_input, [ctx.int32_ty(0), @@ -1295,19 +2221,33 @@ def _gen_llvm_evaluate_function(self, *, ctx:pnlvm.LLVMBuilderContext, ctx.int32_ty(0)]) builder.store(builder.load(src), dst) + # Assert that we have populated all inputs + assert all(input_initialized), \ + "Not all inputs to the simulated composition are initialized: {}".format(input_initialized) + + if "const_input" in debug_env: + if not debug_env["const_input"]: + input_init = [[os.defaults.variable.tolist()] for os in self.agent_rep.input_CIM.input_ports] + print("Setting default input: ", input_init) + else: + input_init = ast.literal_eval(debug_env["const_input"]) + print("Setting user input in evaluate: ", input_init) + + builder.store(comp_input.type.pointee(input_init), comp_input) + # Determine simulation counts - num_estimates_ptr = pnlvm.helpers.get_param_ptr(builder, self, + num_trials_per_estimate_ptr = pnlvm.helpers.get_param_ptr(builder, self, controller_params, - "num_estimates") + "num_trials_per_estimate") - num_estimates = builder.load(num_estimates_ptr, "num_estimates") + num_trials_per_estimate = builder.load(num_trials_per_estimate_ptr, "num_trials_per_estimate") - # if num_estimates is 0, run 1 trial - param_is_zero = builder.icmp_unsigned("==", num_estimates, + # if num_trials_per_estimate is 0, run 1 trial + param_is_zero = builder.icmp_unsigned("==", num_trials_per_estimate, ctx.int32_ty(0)) num_sims = builder.select(param_is_zero, ctx.int32_ty(1), - num_estimates, "corrected_estimates") + num_trials_per_estimate, "corrected_estimates") num_runs = builder.alloca(ctx.int32_ty, name="num_runs") builder.store(num_sims, num_runs) @@ -1395,53 +2335,69 @@ def _gen_llvm_output_port_parse_variable(self, ctx, builder, params, context, va # else: # return np.array(np.array(self.variable[1:]).tolist()) - # FIX: THE FOLLOWING SHOULD BE MERGED WITH HANDLING OF PredictionMechanisms FOR ORIG MODEL-BASED APPROACH; - # FIX: SHOULD BE GENERALIZED AS SOMETHING LIKE update_feature_values - - @tc.typecheck - @handle_external_context() - def add_state_features(self, features, context=None): - """Add InputPorts and Projections to OptimizationControlMechanism for state_features used to - predict `net_outcome ` - - **state_features** argument can use any of the forms of specification allowed for InputPort(s) - """ + @property + def agent_rep_type(self): + from psyneulink.core.compositions.compositionfunctionapproximator import CompositionFunctionApproximator + if isinstance(self.agent_rep, CompositionFunctionApproximator): + return COMPOSITION_FUNCTION_APPROXIMATOR + elif self.agent_rep.componentCategory=='Composition': + return COMPOSITION + else: + return None - if features: - features = self._parse_state_feature_specs(features=features, - context=context) - self.add_ports(InputPort, features) + def _parse_state_feature_function(self, feature_function): + if isinstance(feature_function, Function): + return copy.deepcopy(feature_function) + else: + return feature_function @tc.typecheck - def _parse_state_feature_specs(self, input_ports, feature_function, context=None): + def _parse_state_feature_specs(self, state_features, feature_functions, context=None): """Parse entries of state_features into InputPort spec dictionaries Set INTERNAL_ONLY entry of params dict of InputPort spec dictionary to True (so that inputs to Composition are not required if the specified state is on an INPUT Mechanism) - Assign functions specified in **state_feature_function** to InputPorts for all state_features + Assign functions specified in **state_feature_functions** to InputPorts for all state_features Return list of InputPort specification dictionaries """ - parsed_features = [] + _state_input_ports = _parse_shadow_inputs(self, state_features) - if not isinstance(input_ports, list): - input_ports = [input_ports] + parsed_features = [] - for spec in input_ports: - spec = _parse_port_spec(owner=self, port_type=InputPort, port_spec=spec) # returns InputPort dict - spec[PARAMS][INTERNAL_ONLY] = True - spec[PARAMS][PROJECTIONS] = None - if feature_function: - if isinstance(feature_function, Function): - feat_fct = copy.deepcopy(feature_function) + for spec in _state_input_ports: + # MODIFIED 11/29/21 NEW: + # If optimization uses Composition, assume that shadowing a Mechanism means shadowing its primary InputPort + if isinstance(spec, Mechanism) and self.agent_rep_type == COMPOSITION: + # FIX: 11/29/21: MOVE THIS TO _parse_shadow_inputs + # (ADD ARG TO THAT FOR DOING SO, OR RESTRICTING TO INPUTPORTS IN GENERAL) + if len(spec.input_ports)!=1: + raise OptimizationControlMechanismError(f"A Mechanism ({spec.name}) is specified in the " + f"'{STATE_FEATURES}' arg for {self.name} that has " + f"more than one InputPort; a specific one or subset " + f"of them must be specified.") + spec = spec.input_port + parsed_spec = _parse_port_spec(owner=self, port_type=InputPort, port_spec=spec) # returns InputPort dict + parsed_spec[PARAMS].update({INTERNAL_ONLY:True, + PROJECTIONS:None}) + if feature_functions: + if isinstance(feature_functions, dict) and spec in feature_functions: + feat_fct = feature_functions.pop(spec) else: - feat_fct = feature_function - spec.update({FUNCTION: feat_fct}) - spec = [spec] # so that extend works below + feat_fct = feature_functions + parsed_spec.update({FUNCTION: self._parse_state_feature_function(feat_fct)}) + parsed_spec = [parsed_spec] # so that extend works below - parsed_features.extend(spec) + parsed_features.extend(parsed_spec) return parsed_features + @property + def num_state_input_ports(self): + try: + return len(self.state_input_ports) + except: + return 0 + @property def _model_spec_parameter_blacklist(self): # default_variable is hidden in constructor arguments, @@ -1465,3 +2421,19 @@ def _initialize_composition_function_approximator(self, context): self.agent_rep.initialize(features_array=np.array(self.defaults.variable[1:]), control_signals = self.control_signals, context=context) + + # FIX: THE FOLLOWING SHOULD BE MERGED WITH HANDLING OF PredictionMechanisms FOR ORIG MODEL-BASED APPROACH; + # FIX: SHOULD BE GENERALIZED AS SOMETHING LIKE update_feature_values + @tc.typecheck + @handle_external_context() + def add_state_features(self, features, context=None): + """Add InputPorts and Projections to OptimizationControlMechanism for state_features used to + predict `net_outcome ` + + **state_features** argument can use any of the forms of specification allowed for InputPort(s) + """ + + if features: + features = self._parse_state_feature_specs(features=features, + context=context) + self.add_ports(InputPort, features) diff --git a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py index 97ec250b099..663b65a2a6d 100644 --- a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py @@ -46,8 +46,8 @@ `, generated by the `primary_learned_projection` and its associated `output_source`. All of the MappingProjection(s) modified by a LearningMechanism must project from one `ProcessingMechanism ` to another in the same `Composition`. The learning components of a Composition can be -displayed using the System's `show_graph ` method with its **show_learning** argument assigned -`True` or *ALL*. +displayed using the Composition's `show_graph`show_graph ` method with its +**show_learning** argument assigned `True` or *ALL*. .. _LearningMechanism_Note @@ -369,7 +369,8 @@ learning `) or using the `learning method ` of a Composition, all of the Components required for learning are created automatically. The types of Components that are generated depend on the type of learning specified and the configuration of the `Composition `, as -described below. All of the learning Components of a Composition can be displayed using its `show_graph` method with +described below. All of the learning Components of a Composition can be displayed using its `show_graph +``show_graph ` method with the **show_learning** argument assigned `True` or *ALL*. .. _LearningMechanism_Single_Layer_Learning: @@ -404,7 +405,6 @@ .. figure:: _static/LearningMechanism_Single_Layer_Learning_fig.svg :alt: Schematic of Mechanisms and Projections involved in learning for a single MappingProjection - :scale: 50 % Components generated by a call to a `learning method ` of a Composition (e.g., ``add_backpropagaption_learning_pathway(pathway=[X, Y]``), labeled by type of Component (in **bold**, @@ -459,7 +459,6 @@ .. figure:: _static/LearningMechanism_Multilayer_Learning_fig.svg :alt: Schematic of Mechanisms and Projections involved in learning for a sequence of MappingProjections - :scale: 50% Components generated by a call to a `learning method ` of a Composition for a sequence with three ProcessingMechanisms (and therefore two MappingProjections to be learned; e.g., @@ -528,20 +527,20 @@ """ -import numpy as np -import typecheck as tc import warnings - from enum import Enum +import numpy as np +import typecheck as tc + from psyneulink.core.components.component import parameter_keywords from psyneulink.core.components.functions.nonstateful.learningfunctions import BackPropagation -from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base +from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base from psyneulink.core.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism -from psyneulink.core.components.shellclasses import Mechanism from psyneulink.core.components.ports.modulatorysignals.learningsignal import LearningSignal from psyneulink.core.components.ports.parameterport import ParameterPort +from psyneulink.core.components.shellclasses import Mechanism from psyneulink.core.globals.context import ContextFlags, handle_external_context from psyneulink.core.globals.keywords import \ ADDITIVE, AFTER, ASSERT, ENABLED, INPUT_PORTS, \ @@ -550,7 +549,8 @@ from psyneulink.core.globals.parameters import FunctionParameter, Parameter from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel -from psyneulink.core.globals.utilities import ContentAddressableList, convert_to_np_array, is_numeric, parameter_spec, convert_to_list +from psyneulink.core.globals.utilities import ContentAddressableList, convert_to_np_array, is_numeric, parameter_spec, \ + convert_to_list __all__ = [ 'ACTIVATION_INPUT', 'ACTIVATION_INPUT_INDEX', 'ACTIVATION_OUTPUT', 'ACTIVATION_OUTPUT_INDEX', diff --git a/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py b/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py index 1959b535e1b..df26cf1ded9 100644 --- a/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py @@ -44,7 +44,7 @@ that is computed based on the `costs ` of its ControlSignals. A ControlMechanism can be assigned only the `ControlSignal` class of `ModulatorySignal`, but can be also be assigned other generic `OutputPorts ` that appear after its ControlSignals in its `output_ports - ` attribute. + ` attribute. `GatingMechanism` is a specialized subclass of ControlMechanism, that is used to modulate the `value ` of an `InputPort` or `OutputPort`, and that uses diff --git a/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py b/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py index 526b6729659..eff8c4cbbad 100644 --- a/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py +++ b/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py @@ -25,31 +25,81 @@ Overview -------- -A CompositionInterfaceMechanism stores inputs from outside the Composition so that those can be delivered to the -Composition's `INPUT ` Mechanism(s). +CompositionInterfaceMechanisms act as interfaces between a `Composition` and its inputs from and outputs to the +environment, or the Components of another Composition within which it is `nested `. + +.. technical_note:: + + The CompositionInterfaceMechanism provides both a standard interface through which other Components can interact + with the environment and/or Compositions, as well as a means of preserving the modularity of Compositions for + `compilation `. By providing the standard Components for communication among `Mechanisms + ` (`InputPorts ` and `OutputPorts `), Mechanisms (and/or other Compositions) that + are `INPUT ` `Nodes ` of a Composition can receive inputs from the environment + in the same way that any other Node receives inputs, from `afferent Projections ` (in + this case, the `input_CIM ` of the Composition to which they belong); and, similarly, + Components that are `OUTPUT ` `Nodes ` of a Composition can either report their + outputs to the Composition or, if they are in a `nested Composition `, send their outputs to + Nodes in an enclosing Composition just like any others, using `efferent Projections `. + Similarly, for Compilation, they provide a standard interface through which to provide inputs to a Composition and + for aggregating outputs that, again, maintain a standard interface to other Components (which may not be compiled). .. _CompositionInterfaceMechanism_Creation: -Creating an CompositionInterfaceMechanism ------------------------------------------ +Creation +-------- -A CompositionInterfaceMechanism is created automatically when an `INPUT ` Mechanism is identified in a -Composition. When created, the CompositionInterfaceMechanism's OutputPort is set directly by the Composition. This -Mechanism should never be executed, and should never be created by a user. +The following three CompositionInterfaceMechanisms are created and assigned automatically to a Composition when it is +constructed (and should never be constructed manually): `input_CIM `, `parameter_CIM +` and `output_CIM ` (see `Composition_CIMs` for additional details). -.. _CompositionInterfaceMechanism_Structure +.. _CompositionInterfaceMechanism_Structure: Structure --------- -[TBD] - -.. _CompositionInterfaceMechanism_Execution +A CompositionInterfaceMechanisms has a set of `InputPort` / `OutputPort` pairs that its `function +` -- the `Identity` `Function` -- uses to transmit inputs to CompositionInterfaceMechanism +to its outputs. These are listed in its `port_map ` attribute, each entry +of which is a key designating the `Port` of the Component with which the CompositionInterfaceMechanism communicates +outside the Composition (i.e., from an `input_CIM ` receives an `afferent Projection +`, a `parameter_CIM ` receives a `modulatory projection +`, or an `output_CIM ` sends an `efferent Projection +`), and the value of which is a tuple containing the corresponding (`InputPort`, +`OutputPort`) pair used to transmit the information to or from the CompositionInterfaceMechanism. +CompositionIntefaceMechanisms can be seen graphically using the `show_cim ` option of the +Composition's `show_graph ` method (see figure below). + +.. figure:: _static/CIM_figure.svg + + **Examples of Projections to nested Compositions routed through CompositionInterfaceMechanisms.** *Panel A:* + Simple example showing a basic configuration. *Panel B:* More complex configuration, generated from script below, + showing Projections automatically created from the Node of an outer Composition (*X*) to two `INPUT + ` `Nodes ` of a `nested Composition `, a `ControlProjection` + from a `ControlMechanism` in the outer Composition to a Node it modulates in the nested one, and from a `PROBE + ` Node (*B*) in the nested Composition to the `ControlMechanism` that monitors it. :: + + A = ProcessingMechanism(name='A') + B = ProcessingMechanism(name='B') + C = ProcessingMechanism(name='C') + D = ProcessingMechanism(name='D') + E = ProcessingMechanism(name='E') + F = ProcessingMechanism(name='F') + nested_comp = Composition(pathways=[[A,B,C], [D,E,F]], name='NESTED COMPOSITION') + X = ProcessingMechanism(name='INPUT NODE') + Y = ProcessingMechanism(name='OUTPUT NODE') + C = ControlMechanism(name='CONTROL MECHANISM', + monitor_for_control=B, + control=("slope", E)) + outer_comp = Composition(name='OUTER COMPOSITION', pathways=[X, nested_comp, Y, C]) + outer_comp.show_graph(show_cim=NESTED, show_node_structure=True) + +.. _CompositionInterfaceMechanism_Execution: Execution --------- -[TBD] +A CompositionInterface Mechanism is executed when the Composition to which it belongs is executed, and shown never +be executed manually. .. _CompositionInterfaceMechanism_Class_Reference: @@ -59,10 +109,10 @@ """ import warnings -import typecheck as tc - from collections.abc import Iterable +import typecheck as tc + from psyneulink.core.components.functions.nonstateful.transferfunctions import Identity from psyneulink.core.components.mechanisms.mechanism import Mechanism from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism_Base @@ -70,7 +120,8 @@ from psyneulink.core.components.ports.modulatorysignals.controlsignal import ControlSignal from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.globals.context import ContextFlags, handle_external_context -from psyneulink.core.globals.keywords import COMPOSITION_INTERFACE_MECHANISM, INPUT_PORTS, OUTPUT_PORTS, PREFERENCE_SET_NAME +from psyneulink.core.globals.keywords import COMPOSITION_INTERFACE_MECHANISM, INPUT_PORTS, OUTPUT_PORTS, \ + PREFERENCE_SET_NAME from psyneulink.core.globals.parameters import Parameter from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set, REPORT_OUTPUT_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel @@ -83,8 +134,8 @@ class CompositionInterfaceMechanism(ProcessingMechanism_Base): CompositionInterfaceMechanism( \ function=Identity()) - Subclass of `ProcessingMechanism ` that acts as interface between a Composition and its - inputs from and outputs to the environment or other Mechanisms (if it is a nested Composition). + Subclass of `ProcessingMechanism ` that acts as interface between a Composition and its inputs + from and outputs to the environment or other Components (if it is a `nested Composition `). See `Mechanism ` for arguments and additional attributes. @@ -94,6 +145,11 @@ class CompositionInterfaceMechanism(ProcessingMechanism_Base): function : InterfaceFunction : default Identity the function used to transform the variable before assigning it to the Mechanism's OutputPort(s) + port_map : dict[Port:(InputPort,OutputPort)] + entries are comprised of keys designating a Component outside the Composition with which it communicates, + and values tuples that designate the corresponding `InputPort` - `OutputPort` pairs used to transmit that + information into or out of the Composition (see `CompositionInterfaceMechanism_Structure`, and + `Composition_CIMs` under Composition for additional details). """ componentType = COMPOSITION_INTERFACE_MECHANISM @@ -175,3 +231,41 @@ def remove_ports(self, ports, context=None): if port not in self.output_ports: output_ports_marked_for_deletion.add(port) self.user_added_ports[OUTPUT_PORTS] = self.user_added_ports[OUTPUT_PORTS] - output_ports_marked_for_deletion + + def _get_destination_node_for_input_port(self, input_port, comp=None): + """Return Port, Node and Composition for destination of projection from input_CIM to (possibly nested) node""" + # CIM MAP ENTRIES: [RECEIVER PORT, [input_CIM InputPort, input_CIM OutputPort]] + # Get sender to input_port of CIM for corresponding output_port + comp = comp or self + port_map = input_port.owner.port_map + output_port = [port_map[k][1] for k in port_map if port_map[k][0] is input_port] + assert len(output_port)==1, f"PROGRAM ERROR: Expected only 1 output_port for {input_port.name} " \ + f"in port_map for {input_port.owner}; found {len(output_port)}." + assert len(output_port[0].efferents)==1, f"PROGRAM ERROR: Port ({output_port.name}) expected to have " \ + f"just one efferent; has {len(output_port.efferents)}." + receiver = output_port[0].efferents[0].receiver + if not isinstance(receiver.owner, CompositionInterfaceMechanism): + return receiver, receiver.owner, comp + return self._get_destination_node_for_input_port(receiver, receiver.owner.composition) + + def _get_source_node_for_output_port(self, output_port, comp=None): + """Return Port, Node and Composition for source of projection to output_CIM from (possibly nested) node""" + # CIM MAP ENTRIES: [SENDER PORT, [output_CIM InputPort, output_CIM OutputPort]] + # Get sender to input_port of CIM for corresponding output_port + comp = comp or self + port_map = output_port.owner.port_map + input_port = [port_map[k][0] for k in port_map if port_map[k][1] is output_port] + assert len(input_port)==1, f"PROGRAM ERROR: Expected only 1 input_port for {output_port.name} " \ + f"in port_map for {output_port.owner}; found {len(input_port)}." + assert len(input_port[0].path_afferents)==1, f"PROGRAM ERROR: Port ({input_port.name}) expected to have " \ + f"just one path_afferent; has {len(input_port.path_afferents)}." + sender = input_port[0].path_afferents[0].sender + if not isinstance(sender.owner, CompositionInterfaceMechanism): + return sender, sender.owner, comp + return self._get_source_node_for_output_port(sender, sender.owner.composition) + + def _sender_is_probe(self, output_port): + """Return True if source of output_port is a PROBE Node of the Composition to which it belongs""" + from psyneulink.core.compositions.composition import NodeRole + port, node, comp = self._get_source_node_for_output_port(output_port, self.composition) + return NodeRole.PROBE in comp.get_roles_by_node(node) diff --git a/psyneulink/core/components/mechanisms/processing/integratormechanism.py b/psyneulink/core/components/mechanisms/processing/integratormechanism.py index b6bd6c107cf..9b479cc0b71 100644 --- a/psyneulink/core/components/mechanisms/processing/integratormechanism.py +++ b/psyneulink/core/components/mechanisms/processing/integratormechanism.py @@ -89,9 +89,8 @@ from psyneulink.core.components.functions.stateful.integratorfunctions import AdaptiveIntegrator from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism_Base from psyneulink.core.components.mechanisms.mechanism import Mechanism -from psyneulink.core.globals.context import ContextFlags from psyneulink.core.globals.keywords import \ - DEFAULT_VARIABLE, INTEGRATOR_MECHANISM, RESULTS, VARIABLE, PREFERENCE_SET_NAME + DEFAULT_VARIABLE, INTEGRATOR_MECHANISM, VARIABLE, PREFERENCE_SET_NAME from psyneulink.core.globals.parameters import Parameter from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set, REPORT_OUTPUT_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel diff --git a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py index 11ca581612b..f25a190559d 100644 --- a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py +++ b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py @@ -364,15 +364,15 @@ """ import warnings -import typecheck as tc - -from collections.abc import Iterable from collections import namedtuple +from collections.abc import Iterable + +import typecheck as tc from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism_Base -from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.components.ports.inputport import InputPort, INPUT_PORT +from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.components.ports.port import _parse_port_spec from psyneulink.core.globals.context import ContextFlags, handle_external_context from psyneulink.core.globals.keywords import \ @@ -433,8 +433,10 @@ class ObjectiveMechanism(ProcessingMechanism_Base): output_ports : list[OutputPort, value, str or dict] or dict[] : default [OUTCOME] specifies the OutputPorts for the Mechanism; + COMMENT: role: LEARNING or CONTROL : default None specifies if the ObjectiveMechanism is being used for learning or control (see `role` for details). + COMMENT Attributes ---------- @@ -467,11 +469,6 @@ class ObjectiveMechanism(ProcessingMechanism_Base): items or a number equal to the number of items in the ObjectiveMechanism's variable (i.e., its number of input_ports) and returns a 1d array. - role : None, LEARNING or CONTROL - specifies whether the ObjectiveMechanism is used for learning in a `Composition` (in conjunction with a - `LearningMechanism`), or for control in a Composition (in conjunction with a `ControlMechanism - `). - output_port : OutputPort contains the `primary OutputPort ` of the ObjectiveMechanism; the default is its *OUTCOME* `OutputPort `, the value of which is equal to the @@ -493,6 +490,12 @@ class ObjectiveMechanism(ProcessingMechanism_Base): the value of the objective or "loss" function computed by the ObjectiveMechanism's `function ` + modulatory_mechanism : None or ModulatoryMechanism + `ModulatoryMechanism` to which ObjectiveMechanism has been assigned, and to which one or more of its + `output_ports ` project. If that is a `ControlMechanism`, then + the ObjectiveMechanism is assigned as its `objective_mechanism `; + if it is a `LearningMechanism`, it is in its `error_sources ` attribute. + """ componentType = OBJECTIVE_MECHANISM @@ -599,6 +602,7 @@ def __init__(self, # This is used to specify whether the ObjectiveMechanism is associated with a ControlMechanism that is # the controller for a Composition; it is set by the ControlMechanism when it creates the ObjectiveMechanism self.for_controller = False + self.modulatory_mechanism = None def _validate_params(self, request_set, target_set=None, context=None): """Validate **role**, **monitor**, amd **input_ports** arguments @@ -833,6 +837,8 @@ def monitored_output_ports_weights_and_exponents(self): def monitored_output_ports_weights_and_exponents(self, weights_and_exponents_tuples): self.monitor_weights_and_exponents = weights_and_exponents_tuples +# FIX: 11/3/21 -- MOVE THIS TO ControlMechanism, AND INTEGRATE WITH ControlMechanism.validate_monitored_port_spec() +# OR MOVE THAT METHOD TO HERE?? def _parse_monitor_specs(monitor_specs): spec_tuple = namedtuple('SpecTuple', 'index spec') parsed_specs = [] diff --git a/psyneulink/core/components/mechanisms/processing/processingmechanism.py b/psyneulink/core/components/mechanisms/processing/processingmechanism.py index 59ea09f4a80..6eb238cf6a3 100644 --- a/psyneulink/core/components/mechanisms/processing/processingmechanism.py +++ b/psyneulink/core/components/mechanisms/processing/processingmechanism.py @@ -204,6 +204,7 @@ def _validate_inputs(self, inputs=None): # Let mechanism itself do validation of the input pass + __all__ = [ 'DEFAULT_RATE', 'ProcessingMechanism', 'ProcessingMechanismError' ] diff --git a/psyneulink/core/components/ports/inputport.py b/psyneulink/core/components/ports/inputport.py index 9cee72efdea..03bf08ed93a 100644 --- a/psyneulink/core/components/ports/inputport.py +++ b/psyneulink/core/components/ports/inputport.py @@ -331,6 +331,24 @@ list. In other words, for each InputPort specified, a new one is created that receives exactly the same inputs from the same `senders ` as the ones specified. + If an InputPort shadows another, its `shadow_inputs ` attribute identifies the InputPort + that it shadows. + + .. note:: + Only InputPorts belonging to Mechanisms in the *same Composition*, or ones that are `INPUT ` + `Nodes ` of a `nested ` can be specified for shadowing, unless the + `allow_probes ` attribute of the `Composition` is set to True. Note also that any + Node that shadows an `INPUT ` `Node ` of the Composition to which it + belongs is itself also assigned the role of `INPUT ` Node. + + .. hint:: + If an InputPort needs to be shadowed that belongs to a Mechanism in a `nested ` that is + not an `INPUT ` `Node ` of that Composition, this can be accomplished as + follows: 1) add a Mechanism to the nested Composition with an InputPort that shadows the one to be + shadowed; 2) specify `OUTPUT ` as a `required_role ` + for that Mechanism; 3) use that Mechanism as the `InputPort specification ` + for the shadowing InputPort. + .. _InputPort_Compatability_and_Constraints: InputPort `variable `: Compatibility and Constraints @@ -514,7 +532,8 @@ from psyneulink.core.globals.parameters import Parameter from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel -from psyneulink.core.globals.utilities import append_type_to_name, convert_to_np_array, is_numeric, iscompatible, kwCompatibilityLength +from psyneulink.core.globals.utilities import \ + append_type_to_name, convert_to_np_array, is_numeric, iscompatible, kwCompatibilityLength, convert_to_list __all__ = [ 'InputPort', 'InputPortError', 'port_type_keywords', 'SHADOW_INPUTS', @@ -658,6 +677,9 @@ class InputPort(Port_Base): ` is an `INPUT` `Node ` of that Composition; if `True`, external input is *not* required or allowed. + shadow_inputs : InputPort + identifies the InputPort of another `Mechanism` that is being shadowed by this InputPort. + name : str the name of the InputPort; if it is not specified in the **name** argument of the constructor, a default is assigned by the InputPortRegistry of the Mechanism to which the InputPort belongs. Note that some Mechanisms @@ -1211,11 +1233,11 @@ def _parse_self_port_type_spec(self, owner, input_port, context=None): sender_output_ports = [p.sender for p in input_port.path_afferents] port_spec = {NAME: SHADOW_INPUT_NAME + input_port.owner.name, - VARIABLE: np.zeros_like(input_port.variable), - PORT_TYPE: InputPort, - PROJECTIONS: sender_output_ports, - PARAMS: {SHADOW_INPUTS: input_port}, - OWNER: owner} + VARIABLE: np.zeros_like(input_port.variable), + PORT_TYPE: InputPort, + PROJECTIONS: sender_output_ports, + PARAMS: {SHADOW_INPUTS: input_port}, + OWNER: owner} return port_spec @staticmethod @@ -1390,8 +1412,9 @@ def _instantiate_input_ports(owner, input_ports=None, reference_value=None, cont return port_list def _parse_shadow_inputs(owner, input_ports): - """Parses any {SHADOW_INPUTS:[InputPort or Mechaism,...]} items in input_ports into InputPort specif. dict.""" + """Parses any {SHADOW_INPUTS:[InputPort or Mechanism,...]} items in input_ports into InputPort specif. dict.""" + input_ports = convert_to_list(input_ports) input_ports_to_shadow_specs=[] for spec_idx, spec in enumerate(input_ports): # If {SHADOW_INPUTS:[InputPort or Mechaism,...]} is found: diff --git a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py index d0e50a95787..5bc8b48516a 100644 --- a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py @@ -400,9 +400,11 @@ import numpy as np import typecheck as tc +import warnings # FIX: EVCControlMechanism IS IMPORTED HERE TO DEAL WITH COST FUNCTIONS THAT ARE DEFINED IN EVCControlMechanism # SHOULD THEY BE LIMITED TO EVC?? +from psyneulink.core import llvm as pnlvm from psyneulink.core.components.functions.nonstateful.combinationfunctions import Reduce from psyneulink.core.components.functions.function import is_function_type from psyneulink.core.components.functions.stateful.integratorfunctions import SimpleIntegrator @@ -1075,8 +1077,81 @@ def compute_costs(self, intensity, context=None): duration_cost = self.duration_cost_function(self.parameters.cost._get(context), context=context) self.parameters.duration_cost._set(duration_cost, context) - return max(0.0, - self.combine_costs_function([intensity_cost, - adjustment_cost, - duration_cost], - context=context)) + all_costs = [intensity_cost, adjustment_cost, duration_cost] + combined_cost = self.combine_costs_function(all_costs, context=context) + return max(0.0, combined_cost) + + def _gen_llvm_function(self, *, ctx:pnlvm.LLVMBuilderContext, + extra_args=[], tags:frozenset): + if "costs" in tags: + assert len(extra_args) == 0 + return self._gen_llvm_costs(ctx=ctx, tags=tags) + + return super()._gen_llvm_function(ctx=ctx, extra_args=extra_args, tags=tags) + + def _gen_llvm_costs(self, *, ctx:pnlvm.LLVMBuilderContext, tags:frozenset): + args = [ctx.get_param_struct_type(self).as_pointer(), + ctx.get_state_struct_type(self).as_pointer(), + ctx.get_input_struct_type(self).as_pointer()] + + assert "costs" in tags + builder = ctx.create_llvm_function(args, self, str(self) + "_costs", + tags=tags, + return_type=ctx.float_ty) + + params, state, arg_in = builder.function.args + + func_params = pnlvm.helpers.get_param_ptr(builder, self, params, + "function") + func_state = pnlvm.helpers.get_state_ptr(builder, self, state, + "function") + + # FIXME: This allows INTENSITY and NONE + assert self.cost_options & ~CostFunctions.INTENSITY == 0 + + cfunc = ctx.import_llvm_function(self.function.combine_costs_fct) + cfunc_in = builder.alloca(cfunc.args[2].type.pointee) + + # Set to 0 be default + builder.store(cfunc_in.type.pointee(None), cfunc_in) + + cost_funcs = 0 + if self.cost_options & CostFunctions.INTENSITY: + ifunc = ctx.import_llvm_function(self.function.intensity_cost_fct) + + ifunc_params = pnlvm.helpers.get_param_ptr(builder, self.function, + func_params, + "intensity_cost_fct") + ifunc_state = pnlvm.helpers.get_state_ptr(builder, self.function, + func_state, + "intensity_cost_fct") + # Port input is always struct { data input, modulations } + ifunc_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) + # point output to the proper slot in comb func input + assert cost_funcs == 0, "Intensity should eb the first cost function!" + ifunc_out = builder.gep(cfunc_in, [ctx.int32_ty(0), ctx.int32_ty(cost_funcs)]) + if ifunc_out.type != ifunc.args[3].type: + warnings.warn("Shape mismatch: {} element of combination func input ({}) doesn't match INTENSITY cost output ({})".format(cost_funcs, self.function.combine_costs_fct.defaults.variable, self.function.intensity_cost_fct.defaults.value)) + assert self.cost_options == CostFunctions.INTENSITY + ifunc_out = cfunc_in + + builder.call(ifunc, [ifunc_params, ifunc_state, ifunc_in, ifunc_out]) + + cost_funcs += 1 + + + # Call combination function + cfunc_params = pnlvm.helpers.get_param_ptr(builder, self.function, + func_params, + "combine_costs_fct") + cfunc_state = pnlvm.helpers.get_state_ptr(builder, self.function, + func_state, + "combine_costs_fct") + cfunc_out = builder.alloca(cfunc.args[3].type.pointee) + builder.call(cfunc, [cfunc_params, cfunc_state, cfunc_in, cfunc_out]) + + + ret_val = pnlvm.helpers.load_extract_scalar_array_one(builder, cfunc_out) + builder.ret(ret_val) + + return builder.function diff --git a/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py b/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py index ccee7128f2b..8cba981e069 100644 --- a/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py @@ -268,6 +268,7 @@ def __init__(self, error_value): def __str__(self): return repr(self.error_value) + gating_signal_keywords = {GATE} # gating_signal_keywords.update(modulatory_signal_keywords) @@ -443,37 +444,37 @@ def __init__(self, **kwargs) def _parse_port_specific_specs(self, owner, port_dict, port_specific_spec): - """Get connections specified in a ParameterPort specification tuple - - Tuple specification can be: - (Port name, Mechanism) - [TBI:] (Mechanism, Port name, weight, exponent, projection_specs) - - Returns params dict with CONNECTIONS entries if any of these was specified. + """Get connections specified in a ParameterPort specification tuple - """ - from psyneulink.core.components.projections.projection import _parse_connection_specs + Tuple specification can be: + (Port name, Mechanism) + [TBI:] (Mechanism, Port name, weight, exponent, projection_specs) - params_dict = {} - port_spec = port_specific_spec + Returns params dict with CONNECTIONS entries if any of these was specified. - if isinstance(port_specific_spec, dict): - return None, port_specific_spec - - elif isinstance(port_specific_spec, tuple): - port_spec = None - params_dict[PROJECTIONS] = _parse_connection_specs(connectee_port_type=self, - owner=owner, - connections=port_specific_spec) - elif port_specific_spec is not None: - raise GatingSignalError("PROGRAM ERROR: Expected tuple or dict for {}-specific params but, got: {}". - format(self.__class__.__name__, port_specific_spec)) - - if params_dict[PROJECTIONS] is None: - raise GatingSignalError("PROGRAM ERROR: No entry found in {} params dict for {} " - "with specification of {}, {} or GatingProjection(s) to it". - format(GATING_SIGNAL, INPUT_PORT, OUTPUT_PORT, owner.name)) - return port_spec, params_dict + """ + from psyneulink.core.components.projections.projection import _parse_connection_specs + + params_dict = {} + port_spec = port_specific_spec + + if isinstance(port_specific_spec, dict): + return None, port_specific_spec + + elif isinstance(port_specific_spec, tuple): + port_spec = None + params_dict[PROJECTIONS] = _parse_connection_specs(connectee_port_type=self, + owner=owner, + connections=port_specific_spec) + elif port_specific_spec is not None: + raise GatingSignalError("PROGRAM ERROR: Expected tuple or dict for {}-specific params but, got: {}". + format(self.__class__.__name__, port_specific_spec)) + + if params_dict[PROJECTIONS] is None: + raise GatingSignalError("PROGRAM ERROR: No entry found in {} params dict for {} " + "with specification of {}, {} or GatingProjection(s) to it". + format(GATING_SIGNAL, INPUT_PORT, OUTPUT_PORT, owner.name)) + return port_spec, params_dict def _instantiate_cost_functions(self, context): """Override ControlSignal as GatingSignal has not cost functions""" diff --git a/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py b/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py index bf8775052b8..697ee0ef062 100644 --- a/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py @@ -203,7 +203,6 @@ .. figure:: _static/Modulation_Anatomy_fig.svg :alt: Modulation - :scale: 150 % **Three types of Modulatory Components and the Ports they modulate**. The default `type of modulation ` for each type of ModulatorySignal, and the default Function and modulated parameter of @@ -285,7 +284,6 @@ .. figure:: _static/Modulation_Detail_fig.svg :alt: Modulation_Detail - :scale: 150 % A ModulatorySignal modulates the `value ` of a Port either by modifying a parameter of the Port's `function `, or assigining the `value ` of the Port directly. @@ -410,11 +408,10 @@ from psyneulink.core.components.component import component_keywords from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.globals.context import ContextFlags +from psyneulink.core.globals.defaults import defaultModulatoryAllocation from psyneulink.core.globals.keywords import \ ADDITIVE_PARAM, DISABLE, MAYBE, MECHANISM, MODULATION, MODULATORY_SIGNAL, MULTIPLICATIVE_PARAM, \ OVERRIDE, PROJECTIONS, VARIABLE - -from psyneulink.core.globals.defaults import defaultModulatoryAllocation from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel __all__ = [ @@ -435,6 +432,7 @@ def _is_modulatory_spec(spec, include_matrix_spec=True): else: return False + modulatory_signal_keywords = {MECHANISM, MODULATION} modulatory_signal_keywords.update(component_keywords) modulation_type_keywords = [MULTIPLICATIVE_PARAM, ADDITIVE_PARAM, OVERRIDE, DISABLE] diff --git a/psyneulink/core/components/ports/outputport.py b/psyneulink/core/components/ports/outputport.py index bc6148c900d..ef2213aafc2 100644 --- a/psyneulink/core/components/ports/outputport.py +++ b/psyneulink/core/components/ports/outputport.py @@ -620,10 +620,8 @@ import types import warnings -from psyneulink.core import llvm as pnlvm from psyneulink.core.components.component import Component, ComponentError from psyneulink.core.components.functions.function import Function -from psyneulink.core.components.functions.nonstateful.transferfunctions import CostFunctions from psyneulink.core.components.ports.port import Port_Base, _instantiate_port_list, port_type_keywords from psyneulink.core.globals.context import ContextFlags, handle_external_context from psyneulink.core.globals.keywords import \ @@ -1291,55 +1289,6 @@ def _dict_summary(self): } } - def _gen_llvm_function(self, *, ctx:pnlvm.LLVMBuilderContext, - extra_args=[], tags:frozenset): - if "costs" in tags: - assert len(extra_args) == 0 - return self._gen_llvm_costs(ctx=ctx, tags=tags) - - return super()._gen_llvm_function(ctx=ctx, extra_args=extra_args, tags=tags) - - def _gen_llvm_costs(self, *, ctx:pnlvm.LLVMBuilderContext, tags:frozenset): - args = [ctx.get_param_struct_type(self).as_pointer(), - ctx.get_state_struct_type(self).as_pointer(), - ctx.get_input_struct_type(self).as_pointer()] - - assert "costs" in tags - builder = ctx.create_llvm_function(args, self, str(self) + "_costs", - tags=tags, - return_type=ctx.float_ty) - - params, state, arg_in = builder.function.args - - # FIXME: Add support for other cost types - assert self.cost_options == CostFunctions.INTENSITY - - ifunc = ctx.import_llvm_function(self.function.intensity_cost_fct) - - func_params = pnlvm.helpers.get_param_ptr(builder, self, params, - "function") - func_state = pnlvm.helpers.get_state_ptr(builder, self, state, - "function") - ifunc_params = pnlvm.helpers.get_param_ptr(builder, self.function, - func_params, - "intensity_cost_fct") - ifunc_state = pnlvm.helpers.get_state_ptr(builder, self.function, - func_state, - "intensity_cost_fct") - ifunc_out = builder.alloca(ifunc.args[3].type.pointee) - # Port input is always struct - ifunc_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) - - builder.call(ifunc, [ifunc_params, ifunc_state, ifunc_in, ifunc_out]) - - - # Cost function output is 1 element array - ret_ptr = builder.gep(ifunc_out, [ctx.int32_ty(0), ctx.int32_ty(0)]) - ret_val = builder.load(ret_ptr) - builder.ret(ret_val) - - return builder.function - def _instantiate_output_ports(owner, output_ports=None, context=None): """Call Port._instantiate_port_list() to instantiate ContentAddressableList of OutputPort(s) diff --git a/psyneulink/core/components/ports/parameterport.py b/psyneulink/core/components/ports/parameterport.py index 659aee2562f..69431683dba 100644 --- a/psyneulink/core/components/ports/parameterport.py +++ b/psyneulink/core/components/ports/parameterport.py @@ -373,7 +373,7 @@ import typecheck as tc from psyneulink.core.components.component import Component, parameter_keywords -from psyneulink.core.components.functions.function import get_param_value_for_keyword +from psyneulink.core.components.functions.function import FunctionError, get_param_value_for_keyword from psyneulink.core.components.ports.modulatorysignals.modulatorysignal import ModulatorySignal from psyneulink.core.components.ports.port import PortError, Port_Base, _instantiate_port, port_type_keywords from psyneulink.core.components.shellclasses import Mechanism, Projection, Function @@ -571,8 +571,16 @@ def _get_possible_port_names(self, param_name): return names @classmethod - def _get_explicit_name(cls, port_name, parameter_name=None): - return f'{port_name}{cls.separator}{parameter_name}' + def _get_explicit_name(cls, port_name, parameter_names=None): + if isinstance(parameter_names, str): + parameter_names = [parameter_names] + + if parameter_names is not None: + suffix = cls.separator + cls.separator.join(p for p in parameter_names) + else: + suffix = '' + + return f'{port_name}{suffix}' @classmethod def _get_base_name(cls, explicit_name): @@ -584,7 +592,7 @@ def _get_base_name(cls, explicit_name): @classmethod def _get_suffix(cls, explicit_name): try: - return explicit_name.split(cls.separator)[1] + return cls.separator.join(explicit_name.split(cls.separator)[1:]) except IndexError: return '' @@ -1005,45 +1013,74 @@ def skip_parameter_port(parameter): or not parameter.modulable ) - port_parameters = collections.defaultdict(set) + def _enumerate_parameter_ports(obj, prev_objs, port_collection): + """ + Returns: + [Dict[str:List[List]]]: a dictionary containing keys that + correspond to parameter names that will get parameter ports, + and values of lists of lists. Each list contains a series of + parameter names that correspond to a path to the function + whose parameter (with name equivalent to the key) will be + modulated + """ + for p in obj.parameters: + # prefer instantiated value if exists + try: + func = p._get(context) + except FunctionError: + func = None + + if func is None: + func = p.default_value + + if ( + not p.reference + and is_instance_or_subclass(func, Function) + and not isinstance(p, (ParameterAlias, SharedParameter)) + ): + _enumerate_parameter_ports( + func, prev_objs + [p.name], port_collection + ) + + if isinstance(p, ParameterAlias): + port_aliases.add(p.name) + + if not skip_parameter_port(p): + port_collection[p.name].append(prev_objs) + + return port_collection + + def _get_corresponding_component(obj, param_list, context=None, index=None): + if index is None: + index = len(param_list) - 1 + + res = obj + for p in param_list[0:index + 1]: + param = getattr(res.parameters, p) + func = param._get(context) + + if func is None: + func = param.default_value + + res = func + + return res + port_aliases = set() - owner_ports = set() - - # function may be a custom function not yet parsed to a UDF - # function may also be a Function class, in which case parameter - # ports are still created for the modulable Parameters - - for p in owner.parameters: - func = p.default_value - if ( - not p.reference - and is_instance_or_subclass(func, Function) - and not isinstance(p, (ParameterAlias, SharedParameter)) - ): - for func_param in func.parameters: - if not skip_parameter_port(func_param): - port_parameters[func_param.name].add(p.name) - if isinstance(p, ParameterAlias): - port_aliases.add(p.name) - - if not skip_parameter_port(p): - owner_ports.add(p.name) - - for parameter_port_name in port_parameters: - if ( - len(port_parameters[parameter_port_name]) > 1 + port_collection = _enumerate_parameter_ports( + owner, [], collections.defaultdict(list) + ) + + for parameter_port_name in port_collection: + add_suffix = ( + len(port_collection[parameter_port_name]) > 1 or parameter_port_name in port_aliases - or parameter_port_name in owner_ports - ): - add_suffix = True - else: - add_suffix = False + ) - for corresponding_parameter_component_name in port_parameters[parameter_port_name]: - corresponding_parameter_component = getattr( - owner.parameters, - corresponding_parameter_component_name - )._get(context) + for corresponding_parameter_component_names in port_collection[parameter_port_name]: + corresponding_parameter_component = _get_corresponding_component( + owner, corresponding_parameter_component_names, context + ) p = getattr( corresponding_parameter_component.parameters, @@ -1052,15 +1089,26 @@ def skip_parameter_port(parameter): # .function is not finalized yet, because this happens before # _instantiate_function - if corresponding_parameter_component_name is FUNCTION: - source = operator.attrgetter(f'{FUNCTION}.parameters.{p.name}') - else: + if corresponding_parameter_component is owner and p.name != FUNCTION: source = p + else: + source = operator.attrgetter( + f'{".".join(corresponding_parameter_component_names)}.parameters.{p.name}' + ) # use Shared/FunctionParameter value as fallback try: - value = owner.initial_shared_parameters[corresponding_parameter_component_name][p.name] - except (KeyError, TypeError): + # want the isp value for the last parameter name of the + # second to last component + isp_owner = _get_corresponding_component( + owner, corresponding_parameter_component_names, context, -2 + ) + except IndexError: + isp_owner = owner + + try: + value = isp_owner.initial_shared_parameters[corresponding_parameter_component_names[-1]][p.name] + except (AttributeError, IndexError, KeyError, TypeError): value = None # if parameter value on actual Parameter was specified or there is @@ -1074,7 +1122,9 @@ def skip_parameter_port(parameter): if add_suffix: explicit_name = ParameterPortList._get_explicit_name( p.name, - corresponding_parameter_component_name + corresponding_parameter_component_names + if len(corresponding_parameter_component_names) > 0 + else ParameterPortList._owner_port_suffix ) else: explicit_name = p.name @@ -1089,34 +1139,6 @@ def skip_parameter_port(parameter): explicit_name=explicit_name ) - for p in owner.parameters: - if not skip_parameter_port(p): - if ( - p.name in port_parameters - or p.name in port_aliases - ): - explicit_name = ParameterPortList._get_explicit_name( - p.name, - ParameterPortList._owner_port_suffix - ) - else: - explicit_name = p.name - - if p.spec is not None: - value = p.spec - else: - value = p.default_value - - _instantiate_parameter_port( - owner, - p.name, - value, - context=context, - function=function, - source=p, - explicit_name=explicit_name, - ) - owner.parameter_ports.sort(key=lambda port: port.name) def _instantiate_parameter_port( @@ -1195,7 +1217,7 @@ def _get_tuple_for_single_item_modulatory_spec(obj, name, value): elif _is_modulatory_spec(param_value, include_matrix_spec=False) and not isinstance(param_value, tuple): # If parameter is a single Modulatory specification (e.g., ControlSignal, or CONTROL, etc.) - # try to place it in a tuple (for interpretation by _parse_port_spec) using default value as 1st item + # try to place it in a tuple (for interpretation by _parse_port_spec) using default value as 1st item # (note: exclude matrix since it is allowed as a value specification but not a projection reference) try: param_value = _get_tuple_for_single_item_modulatory_spec(function, param_name, param_value) diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index 3ecd5551004..811bcb49c77 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -1766,7 +1766,12 @@ def _get_receiver_port(spec): context=context) # Match the projection's value with the value of the function parameter # should be defaults.value? - mod_proj_spec_value = type_match(projection.value, type(mod_param_value)) + try: + mod_proj_spec_value = type_match(projection.value, type(mod_param_value)) + except TypeError as error: + raise PortError(f"The value for {self.name} of {self.owner.name} ({projection.value}) does " + f"not match the format ({mod_param_value}) of the Parameter it modulates " + f"({receiver.owner.name}[{mod_param_name}]).") if (mod_param_value is not None and not iscompatible(mod_param_value, mod_proj_spec_value)): raise PortError(f"Output of {projection.name} ({mod_proj_spec_value}) is not compatible " @@ -1801,9 +1806,12 @@ def _remove_projection_to_port(self, projection, context=None): else: shape = list(self.defaults.variable.shape) # Reduce outer dimension by one - shape[0]-=1 - self.defaults.variable = np.resize(self.defaults.variable, shape) - self.function.defaults.variable = np.resize(self.function.defaults.variable, shape) + # only if shape is already greater than 1 (ports keep + # default of [0] if no incoming projections) + shape[0] -= 1 + if shape[0] > 0: + self.defaults.variable = np.resize(self.defaults.variable, shape) + self.function.defaults.variable = np.resize(self.function.defaults.variable, shape) del self.path_afferents[self.path_afferents.index(projection)] def _get_primary_port(self, mechanism): @@ -3260,8 +3268,10 @@ def _parse_port_spec(port_type=None, # port_dict[OWNER].name, spec_function_value, spec_function)) if port_dict[REFERENCE_VALUE] is not None and not iscompatible(port_dict[VALUE], port_dict[REFERENCE_VALUE]): - raise PortError("Port value ({}) does not match reference_value ({}) for {} of {})". - format(port_dict[VALUE], port_dict[REFERENCE_VALUE], port_type.__name__, owner.name)) + port_name = f"the {port_dict[NAME]}" if (NAME in port_dict and port_dict[NAME]) else f"an" + raise PortError(f"The value ({port_dict[VALUE]}) for {port_name} {port_type.__name__} of " + f"{owner.name} does not match the reference_value ({port_dict[REFERENCE_VALUE]}) " + f"used for it at construction.") return port_dict diff --git a/psyneulink/core/components/projections/modulatory/controlprojection.py b/psyneulink/core/components/projections/modulatory/controlprojection.py index 2352781f271..624eb563a0d 100644 --- a/psyneulink/core/components/projections/modulatory/controlprojection.py +++ b/psyneulink/core/components/projections/modulatory/controlprojection.py @@ -30,8 +30,8 @@ ` of a `ProcessingMechanism `. It takes the `value ` of a `ControlSignal` of a `ControlMechanism ` and uses it to modify the value of the parameter associated with the ParameterPort to which it projects. All of the ControlProjections in a Composition, along with its other -`control components `, can be displayed using the Composition's `show_graph ` -method with its **show_control** argument assigned as `True`. +`control components `, can be displayed using the Composition's `show_graph +` method with its **show_control** argument assigned as `True`. .. _ControlProjection_Creation: @@ -120,7 +120,7 @@ from psyneulink.core.globals.context import ContextFlags from psyneulink.core.globals.keywords import \ CONTROL, CONTROL_PROJECTION, CONTROL_SIGNAL, INPUT_PORT, OUTPUT_PORT, PARAMETER_PORT -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.parameters import Parameter, SharedParameter from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -141,15 +141,6 @@ def __str__(self): return repr(self.error_value) -def _control_signal_getter(owning_component=None, context=None): - return owning_component.sender.parameters.value._get(context) - - -def _control_signal_setter(value, owning_component=None, context=None): - owning_component.sender.parameters.value._set(value, context, override) - return value - - class ControlProjection(ModulatoryProjection_Base): """ ControlProjection( \ @@ -232,7 +223,7 @@ class Parameters(ModulatoryProjection_Base.Parameters): :type: `Function` """ function = Parameter(Linear, stateful=False, loggable=False) - control_signal = Parameter(None, read_only=True, getter=_control_signal_getter, setter=_control_signal_setter, pnl_internal=True) + control_signal = SharedParameter(None, attribute_name='sender', shared_parameter_name='value') control_signal_params = Parameter( None, diff --git a/psyneulink/core/components/projections/modulatory/learningprojection.py b/psyneulink/core/components/projections/modulatory/learningprojection.py index 80efba5c35c..4b1a4a8bb63 100644 --- a/psyneulink/core/components/projections/modulatory/learningprojection.py +++ b/psyneulink/core/components/projections/modulatory/learningprojection.py @@ -33,8 +33,8 @@ to the *MATRIX* `ParameterPort` of a `MappingProjection`. It takes the `value ` of a `LearningSignal` of a `LearningMechanism`, and uses it to modify the value of the `matrix ` parameter of that MappingProjection. All of the LearningProjections in a System, along with its other `learning -components `, can be displayed using the System's `show_graph -` method with its **show_learning** argument assigned as `True`. +components `, can be displayed using the Composition's `show_graph +` method with its **show_learning** argument assigned as `True`. .. _LearningProjection_Creation: diff --git a/psyneulink/core/components/projections/modulatory/modulatoryprojection.py b/psyneulink/core/components/projections/modulatory/modulatoryprojection.py index 036eb43ce1b..d0e62b4c794 100644 --- a/psyneulink/core/components/projections/modulatory/modulatoryprojection.py +++ b/psyneulink/core/components/projections/modulatory/modulatoryprojection.py @@ -94,10 +94,9 @@ """ -from psyneulink.core.components.projections.projection import Projection_Base, ProjectionRegistry +from psyneulink.core.components.projections.projection import Projection_Base from psyneulink.core.globals.keywords import MODULATORY_PROJECTION, NAME -from psyneulink.core.globals.log import ContextFlags, LogEntry -from psyneulink.core.globals.registry import remove_instance_from_registry +from psyneulink.core.globals.log import ContextFlags __all__ = [ diff --git a/psyneulink/core/components/projections/projection.py b/psyneulink/core/components/projections/projection.py index ac8833cdfd6..40fe7d7d67c 100644 --- a/psyneulink/core/components/projections/projection.py +++ b/psyneulink/core/components/projections/projection.py @@ -1086,7 +1086,7 @@ def _is_projection_spec(spec, proj_type:tc.optional(type)=None, include_matrix_s if isinstance(spec, Projection): if proj_type is None or isinstance(spec, proj_type): - return True + return True else: return False if isinstance(spec, Port): @@ -1142,7 +1142,6 @@ def _is_projection_spec(spec, proj_type:tc.optional(type)=None, include_matrix_s return False - def _is_projection_subclass(spec, keyword): """Evaluate whether spec is a valid specification of type @@ -1178,7 +1177,6 @@ def _is_projection_subclass(spec, keyword): return True return False - def _parse_projection_spec(projection_spec, owner = None, # Used only for error message port_type = None, # Used only for default assignment @@ -1271,7 +1269,6 @@ def _parse_projection_spec(projection_spec, proj_spec_dict[PROJECTION_TYPE])) return proj_spec_dict - def _parse_projection_keyword(projection_spec:str): """Takes keyword (str) and returns corresponding Projection class """ @@ -1588,7 +1585,7 @@ def _parse_connection_specs(connectee_port_type, # If specification is a list of Ports and/or Mechanisms, get Projection spec for each if isinstance(first_item, list): - # Call _parse_connection_spec for each Port or Mechanism, to generate a conection spec for each + # Call _parse_connection_spec for each Port or Mechanism, to generate a connection spec for each for connect_with_spec in first_item: if not isinstance(connect_with_spec, (Port, Mechanism)): raise PortError(f"Item in the list used to specify a {last_item.__name__} " @@ -1617,7 +1614,7 @@ def _parse_connection_specs(connectee_port_type, "Mechanism".format(connectee_port_type.__name__, owner.name, mech_item)) # First item of tuple is a list of Port names, so recursively process it if isinstance(port_item, list): - # Call _parse_connection_spec for each Port name, to generate a conection spec for each + # Call _parse_connection_spec for each Port name, to generate a connection spec for each for port_Name in port_item: if not isinstance(port_Name, str): raise ProjectionError("Expected 1st item of the {} specification tuple for {} ({}) to be " diff --git a/psyneulink/core/compositions/__init__.py b/psyneulink/core/compositions/__init__.py index 80a03ad17c9..d40cc9f88e8 100644 --- a/psyneulink/core/compositions/__init__.py +++ b/psyneulink/core/compositions/__init__.py @@ -1,16 +1,19 @@ from . import composition from . import compositionfunctionapproximator +from . import parameterestimationcomposition from . import showgraph from .composition import * from .pathway import * from .compositionfunctionapproximator import * +from .parameterestimationcomposition import * from .showgraph import * from .report import * __all__ = list(composition.__all__) __all__.extend(pathway.__all__) __all__.extend(compositionfunctionapproximator.__all__) +__all__.extend(parameterestimationcomposition.__all__) __all__.extend(showgraph.__all__) __all__.extend(report.__all__) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 74d588a3d6d..2e4bed59130 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -24,6 +24,8 @@ - `Composition_Graph` - `Composition_Nodes` - `Composition_Nested` + • `Probes ` + - `Composition_CIMs` - `Composition_Projections` - `Composition_Pathways` * `Composition_Controller` @@ -39,17 +41,22 @@ - `Composition_Learning_AutodiffComposition` - `Composition_Learning_UDF` * `Composition_Execution` - - `Reporting ` + - `Execution Methods ` - `Composition_Execution_Inputs` - • `Composition_Input_Dictionary` - • `Composition_Programmatic_Inputs` - - `Composition_Runtime_Params` - - `Composition_Cycles_and_Feedback` - • `Composition_Cycle` - • `Composition_Feedback` - - `Composition_Execution_Context` - - `Composition_Reset` - - `Composition_Compilation` + • `Composition_Input_Formats` + - `Composition_Input_Dictionary` + - `Composition_Programmatic_Inputs` + - `Composition_Execution_Factors` + • `Composition_Runtime_Params` + • `Composition_Cycles_and_Feedback` + - `Composition_Cycle` + - `Composition_Feedback` + • `Composition_Execution_Context` + • `Composition_Timing` + • `Composition_Reset` + • `Composition_Compilation` + - `Results, Reporting and Logging ` + * `Composition_Visualization` * `Composition_Examples` - `Composition_Examples_Creation` - `Composition_Examples_Run` @@ -67,9 +74,9 @@ Overview -------- -.. warning:: - As of PsyNeuLink 0.7.5, the API for using Compositions for Learning has been slightly changed! - Please see `this link ` for more details. + .. warning:: + As of PsyNeuLink 0.7.5, the API for using Compositions for Learning has been slightly changed! + Please see `this link ` for more details. Composition is the base class for objects that combine PsyNeuLink `Components ` into an executable model. It defines a common set of attributes possessed, and methods used by all Composition objects. @@ -173,7 +180,8 @@ - `add_linear_processing_pathway ` adds and a list of `Nodes ` and `Projections ` to the Composition, - inserting a default Projection between any adjacent pair of Nodes for which one is not otherwise specified; + inserting a default Projection between any adjacent pair of Nodes for which one is not otherwise specified + (or possibly a set of Projections if either Node is a Composition -- see method documentation for details); returns the `Pathway` added to the Composition. COMMENT: @@ -252,15 +260,15 @@ its Nodes and the dependencies determined by its Projections. There are no restrictions on the structure of the graph, which can be `acyclic or cyclic `, and/or hierarchical (i.e., contain one or more `nested Compositions `) as described below. A Composition's `graph ` can be -displayed using the Compositon's `show_graph ` method (see `ShowGraph_show_graph_Method`). +displayed using the Composition's `show_graph ` method (see `ShowGraph_show_graph_Method`). .. _Composition_Acyclic_Cyclic: -Acyclic and Cyclic Graphs -^^^^^^^^^^^^^^^^^^^^^^^^^ +**Acyclic and Cyclic Graphs** +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Projections are always directed (that is, information is transimtted in only one direction). Therefore, if the -Projections among the Nodes of the Compostion never form a loop, then it is a `directed acyclic graph (DAG) +Projections among the Nodes of the Composition never form a loop, then it is a `directed acyclic graph (DAG) `_, and the order in which its Nodes are executed can be determined by the structure of the graph itself. However if the Composition contains loops, then its structure is a `cyclic graph `_, and how the Nodes in the loop are initialized and the order in which @@ -272,6 +280,7 @@ XXX ADD FIGURE WITH DAG (FF) AND CYCLIC (RECURRENT) GRAPHS, OR POINT TO ONE BELOW COMMENT + .. _Composition_Nodes: *Nodes* @@ -281,20 +290,19 @@ `. The Nodes of a Composition's graph are listed in its `nodes ` attribute. Each Node is assigned one or more `NodeRoles ` that designate its status in the graph. Nodes are assigned one or more `NodeRoles ` automatically when a Composition is constructed, and when Nodes or `Pathways -` are added to it. However, some of these can be explicitly assigned by specifying the desired -`NodeRole` in any of the following places: +` are added to it or new `Projections ` are assigned to it. However, some of these +can be explicitly assigned by specifying the desired `NodeRole` in any of the following places: .. _Composition_Node_Role_Assignment: * the **required_roles** argument of the Composition's `add_node ` or `add_nodes ` methods; - * a tuple specifying the Node in the **pathways** argument of the Compositon's constructor, a `Pathway`\\'s + * a tuple specifying the Node in the **pathways** argument of the Composition's constructor, a `Pathway`\\'s constructor, or in one of the methods used to add a `Pathway ` to the Composition (see `Composition_Creation`); the Node must be the first item of the tuple, and the `NodeRole` its 2nd item. - * the **roles** argument of the `require_node_roles ` called for an - an existing `Node `. + * the **roles** argument of the `require_node_roles ` called for an existing Node. For example, by default, the `ORIGIN` Nodes of a Composition are assigned as its `INPUT` nodes (that is, ones that receive the `external input ` when it is `run `), and similarly its @@ -306,28 +314,137 @@ to a particular Node can be listed using the `get_roles_by_node ` method, and all of the nodes assigned a particular role can be listed using the `get_nodes_by_role ` method. + .. _Composition_Nested: *Nested Compositions* ~~~~~~~~~~~~~~~~~~~~~ A nested Composition is one that is a `Node ` within another Composition. When the outer -Composition is `executed `, the nested Composition is executed when its Node in the outer is -called to execute by the outer Composition's `scheduler `. Any Node within the outer -Composition can send a`Projection ` to any `INPUT` Node, and can receive a Projection from any `OUTPUT` -Node within the nested Composition. Similarly, a `ControlMechanism` within the outer Composition can modulate the -parameter of any `Mechanism ` within the nested Composition. +Composition is `executed `, the nested Composition is executed when its Node in the outer +is called to execute by the outer Composition's `scheduler `. Any depth of nesting of +Compositions withinothers is allowed. + +*Projections to Nodes in a nested Composition.* Any Node within an outer Composition can send a `Projection +` to any `INPUT ` Node of any Composition that is enclosed within it (i.e., at any level +of nesting). In addition, a `ControlMechanism` within an outer Composition can modulate the parameter (i.e., +send a `ControlProjection` to the `ParameterPort`) of *any* `Mechanism ` in a Composition nested within it, +not just its `INPUT ` Nodes. + +*Projections from Nodes in a nested Composition.* The nodes of an outer Composition can also *receive* Projections +from Nodes within a nested Composition. This is true for any `OUTPUT ` of the nested Composition, +and it is also true for any of its other Nodes if `allow_probes ` is True (the default); +if it is *CONTROL*, then only the `controller ` of a Composition can receive Projections +from Nodes in a nested Composition that are not `OUTPUT ` Nodes. + + .. _Composition_Probes: + +* *Probes* -- Nodes that are not `OUTPUT ` of a nested Composition but project to ones in an + outer Composition are assigned `PROBE ` in addition to their other `roles ` in the + nested Composition. The only difference between `PROBE ` and `OUTPUT ` Nodes + is whether their output is included in the `output_values ` and `results + ` attributes of the outermost Composition to which they project; this is determined by the + `include_probes_in_output ` attribute of the latter. If + `include_probes_in_output ` is False (the default), then the output of any + `PROBE ` Nodes in any Composition nested within it are *not* included in + the `output_values ` or `results ` for the Composition to which + they project. In this respect, they can be thought of as "probing" - that is, providing access to "latent variables" + of -- the Composition to which they belong -- the values of which that are not otherwise reported as part of the + Composition's output or results. If `include_probes_in_output ` is True, + then any `PROBE ` Nodes of any nested Compositions are treated the same as `OUTPUT ` + Nodes: their outputs are included in the `output_values ` and `results + ` of that Composition. + + .. note:: + The specification of `include_probes_in_output ` only applies to a + Composition that is not nested in another. At present, specification of the attribute for nested + Compositions is not supported: the **include_probes_in_output** argument in the constructor + for nested Compositions is ignored, and the attribute is automatically set to True. + + .. technical_note:: + This is because Compositions require access to the values of all of the output_CIM of any Compositions + nested within them (see `below `). .. _Composition_Nested_External_Input_Ports: -If a nested Composition is an `INPUT` Node of the outermost Composition then, when the latter is `executed -`, the `inputs specified ` to its `execution method -` must include the InputPorts of the nested Composition. These can be accessed -using the Composition's `exernal_input_ports ` attribute. -A nested Composition can also contain one or more `learning Pathways `, -however a learning Pathway may not extend from an enclosing Composition to one nested within it or vice versa. The -learning Pathways within a nested Composition are executed when that Composition is run, just like any other (see -`Composition_Learning_Execution`). Any depth of nesting of Compositions within others is allowed. +*Inputs for nested Compositions*. If a nested Composition is an `INPUT` Node of all of the Compositions within +which it is nested, including the outermost one, then when the latter is `executed `, +the `inputs specified ` to its `execution method ` must +include the InputPorts of the nested Composition. These can be accessed using the Composition's `exernal_input_ports +` attribute. + +.. _Composition_Nested_Results: + +*Results from nested Compositions.* If a nested Composition is an `OUTPUT` Node of all of the Compositions within +which it is nested, including the outermost one, then when the latter is `executed `, +both the `output_values ` and `results ` of the nested Composition +are also included in those attributes of any intervening and the outermost Composition. If `allow_probes +` is set, then the Composition's `include_probes_in_output +` attribute determines whether their values are also included in the +`output_values ` and `results ` of the outermost Composition +(see `above `). + +.. _Composition_Nested_Learning: + +*Learning in nested Compositions.* A nested Composition can also contain one or more `learning Pathways +`, however a learning Pathway may not extend from an enclosing Composition +to one nested within it or vice versa. The learning Pathways within a nested Composition are executed +when that Composition is run, just like any other (see `Composition_Learning_Execution`). + + +.. _Composition_CIMs: + +*CompositionInterfaceMechanisms* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Every Composition has three `CompositionInterfaceMechanisms `, described below, +that act as interfaces between it and the environment, or other Components if it is `nested ` +within another Composition. The CompositionInterfaceMechanisms of a Composition are created and assigned to it +automatically when the Composition is constructed, and executed automatically when it executes (they should never +be constructed or executed on their own). + +.. _Composition_input_CIM: + +* `input_CIM ` - this is assigned an `InputPort` and `OutputPort` for every `INPUT + ` `Node ` of the Composition to which it belongs. The InputPorts receive input + from either the environment or a Composition within which it is nested. If the Composition is itself an + `INPUT ` Node of an enclosing Composition, then its input must be included in the `inputs + ` to that Composition when it is `executed `. Every InputPort + of an input_CIM is associated with an OutputPort that projects to a corresponding `INPUT ` Node + of the Composition. + +.. _Composition_parameter_CIM: + +* `parameter_CIM ` - this is assigned an `InputPort` and `OutputPort` for every + `Parameter` of every `Node ` of the Composition that is `modulated ` + by a `ModulatoryMechanism` (usually a `ControlMechanism`) outside of the Composition (i.e., from an enclosing + Composition within which it is `nested `). The InputPort receives a Projection from a + `ModulatorySignal` on the ModulatoryMechanism, and the paired OutputPort of the parameter_CIM conveys this via + ModulatoryProjection to the `ParameterPort` for the Paremeter of the Mechanism to be modulated. + + .. technical_note:: + The Projection from a ModulatoryMechanism to the InputPort of a parameter_CIM is the only instance in which a + MappingProjection is used as an `efferent projection ` of a ModulatoryMechanism. + +.. _Composition_output_CIM: + +* `output_CIM ` - this is assigned an `InputPort` and `OutputPort` for every `OUTPUT + ` `Node ` of the Composition to which it belongs. Each InputPort receives input + from an `OUTPUT ` Node of the Composition, and its `value ` is assigned as the + `value ` of a corresponding OutputPort. The latter are assigned to the `output_values + ` and `results ` attributes of the Composition. If the Composition + is `nested ` within another, then the output_CIM's `output_ports ` + send Projections to Components of the Composition within which it is nested. If it is an `OUTPUT ` + Node of the enclosing Composition, then its OutputPorts project the `output_CIM ` of the + enclosing Composition, its `output_values ` are included in those of the enclosing + Composition. If the Composition has an `PROBE ` Nodes, then they too project to the Composition's + output_CIM. If the Composition is nested in another, then the `values ` of the `PROBE + ` Nodes are also included in the Composition's `output_values `; if it + is an outer Composition (i.e. not nested in any other), then the Compositions' `include_probes_in_output + ` attribute determines whether their values are included in its `output_values + ` and `results ` attributes (see `Probes ` for + additional details). + .. _Composition_Projections: @@ -341,7 +458,8 @@ Second, they can be the receiver of a Projection, as in the case of a MappingProjection that receives a `LearningProjection` used to modify its `matrix ` parameter. Nevertheless, since they define the connections and therefore dependencies among the Composition's Nodes, they determine the structure of its -graph. +graph. Subsets of Nodes connected by Projections are often defined as a `Pathway ` as decribed under +`Composition_Pathways` below). .. _Composition_Graph_Projection_Vertices: .. technical_note:: @@ -352,17 +470,29 @@ Although individual Projections are directed, pairs of Nodes can be connected with Projections in each direction (forming a local `cycle `), and the `AutoAssociativeProjection` class of Projection can even connect a Node with itself. Projections can also connect the Node(s) of a Composition to one(s) `nested within it -`. +`. In general, these are to the `INPUT ` Nodes and from the `OUTPUT +` Nodes of a `nested Composition `, but if the Composition's `allow_probes +` attribute is not False, then Projections can be received from any Nodes within a nested +Composition (see `Probes ` for additional details). A ControlMechanism can also control (i.e., +send a `ControlProjection`) to any Node within a nested Composition. -.. _Composition_Projections_to_CIMs: .. technical_note:: - Although Projections can be specified to and from Nodes within a nested Composition, these are actually implemented - as Projections to or from the nested Composition's `input_CIM `,`parameter_CIM - ` or `output_CIM `, respectively; those, in turn, send or receive - Projections to the specified Nodes within the nested Composition. - -Subsets of Nodes connected by Projections are often defined as a `Pathway `, as decribed in the next section. + .. _Composition_Projections_to_CIMs: + + Although Projections can be specified to and from Nodes within a nested Composition, these are actually + implemented as Projections to or from the nested Composition's `input_CIM `, + `parameter_CIM ` or `output_CIM `, respectively; + those, in turn, send or receive Projections to or from the specified Nodes within the nested Composition. + `PROBE ` Nodes of a nested Composition, like `OUTPUT ` Nodes, + project to the Node of an enclosing Composition via the nested Composition's `output_CIM + `, and those of any intervening Compositions if it is nested more than one level deep. + The outputs of `PROBE ` Nodes are included in the `output_values ` and + `results ` of such intervening Compositions (since those values are derived from the + `output_ports ` of the Composition's `output_CIM `. + Specifying `include_probes_in_output ` has no effect on this behavior + for intervening Compositions; it only applies to the outermost Composition to which a PROBE Node projects + (see `Probes ` for additional details). .. _Composition_Pathways: @@ -377,7 +507,7 @@ continguous, overlapping, intersecting, or disjoint, and can have one degree of converging and/or diverging branches (meaning that their branches can't branch). Each Pathway has a name (that can be assigned when it is constructed) and a set of attributes, including a `pathway ` attribute that lists the Nodes and Projections in the -Pathway, a `roles ` attribute that lists the `PathwayRoles ` assigned to it (based on +Pathway, a `roles ` attribute that lists the `PathwayRoles ` assigned to it (based on the `NodeRoles ` assigned to its Nodes), and attributes for particular types of nodes (e.g., `INPUT` and `OUTPUT`) if the Pathway includes nodes assigned the corresponding `NodeRoles `. If a Pathway does not have a particular type of Node, then its attribute returns None. There are @@ -402,10 +532,10 @@ - `Composition_Controller_Execution` -A Composition can be assigned a `controller `. This is a `ControlMechanism`, or a subclass of -one, that modulates the parameters of Components within the Composition (including Components of nested Compositions). -It typically does this based on the output of an `ObjectiveMechanism` that evaluates the value of other Mechanisms in -the Composition, and provides the result to the `controller `. +A Composition can be assigned a `controller `. This must be an `OptimizationControlMechanism`, +or a subclass of one, that modulates the parameters of Components within the Composition (including Components of +nested Compositions). It typically does this based on the output of an `ObjectiveMechanism` that evaluates the value +of other Mechanisms in the Composition, and provides the result to the `controller `. .. _Composition_Controller_Assignment: @@ -425,9 +555,8 @@ IF A DEFAULT OBJ MECH IS CREATED, OR NEITHER OBJ_MECH NOR OCM HAVE MONITOR FOR CONTROL SPECIFIED, THEN PRIMARY OUTPUTPORT OF ALL OUTPUT NODES OF COMP ARE USED (MODULO SPEC ON INDIVIDUAL MECHS) - -Specyfing Parameters to Control -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Specifying Parameters to Control +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A controller can also be specified for the System, in the **controller** argument of the `System`. This can be an existing `ControlMechanism`, a constructor for one, or a class of ControlMechanism in which case a default instance of that class will be created. If an existing ControlMechanism or the constructor for one is used, then @@ -480,16 +609,22 @@ ~~~~~~~~~~~~~~~~~~~~~~ The `controller ` is executed only if the Composition's `enable_controller -` attribute is True. This generally done automatically when the `controller -` is `assigned `. If enabled, the `controller -` is generally executed either before or after all of the other Components in the Composition -have been executed within a given `TimeScale `, as determined by the Composition's -`controller_time_scale` and `controller_mode ` attributes. The Composition's -`controller_condition ` attribute can be used to further customize when it is -executed. All three of these attributes can be specified in corresponding arguments of the -Composition's constructor, or programmatically after it is constructed by assigning the desired value to the -corresponding attribute. - +` attribute is True. This is generally done automatically when the controller is +is `assigned `. If `enabled `, the controller is +executed either before or after all of the other Components in the Composition have been executed at a given +`TimeScale`, and if its specified `Condition ` has been met, as determined by the +Composition's `controller_mode `, `controller_time_scale +` and `controller_condition ` attributes. By +default, a controller is enabled, and executes after the rest of the Composition (`controller_mode +`\\= *AFTER*) at the end of every trial (`controller_time_scale +`\\= `TimeScale.TRIAL` and `controller_condition ` += `Always()`). However, `controller_mode ` can be used to specify execution of the +controller before the Composition; `controller_time_scale ` can be used to specify +execution at a particular `TimeScale` (that is at the beginning or end of every `TIME_STEP `, +`PASS , or `RUN `); and `controller_condition ` can +be used to specify a particular `Condition` that must be satisified for the controller to execute. Arguments for all +three of these attributes can be specified in the Composition's constructor, or programmatically after it is +constructed by assigning the desired value to the corresponding attribute. .. _Composition_Learning: @@ -703,7 +838,6 @@ .. figure:: _static/Composition_Multilayer_Learning_fig.svg :alt: Schematic of LearningMechanism and LearningProjections in a Process - :scale: 50 % *Components for supervised learning Pathway*: the Pathway has three Mechanisms generated by a call to a `supervised learning method ` (e.g., ``add_backpropagation_learning_pathway(pathway=[A,B,C])``), @@ -741,7 +875,6 @@ .. figure:: _static/Composition_Learning_OUTPUT_vs_TERMINAL_fig.svg :alt: Schematic of Mechanisms and Projections involved in learning - :scale: 50 % Configuration of Components generated by the creation of two intersecting `learning Pathways ` (e.g., ``add_backpropagation_learning_pathway(pathway=[A,B])`` and @@ -777,7 +910,6 @@ .. figure:: _images/Composition_XOR_animation.gif :alt: Animation of Composition with learning - :scale: 50 % Animation of XOR Composition in example above when it is executed by calling its `learn ` method with the argument ``animate={'show_learning':True}``. @@ -835,10 +967,10 @@ `, in which case it is automatically wrapped as `UserDefinedFunction`. For example, the `forward and backward methods `_ of a PyTorch object can be assigned in this way. The advanatage of this approach is that it can be applied to any Python function that adheres to the requirements -of a `UserDefinedFunction`. It must be carefully coordinated with the execution of other learning-related Components in the Composition, to insure -that each function is called at the appropriate times during execution. Furthermore, as with an `AutodiffComposition`, -the internal constituents of the object (e.g., intermediates layers of a neural network model) are not accessible to -other Components in the Composition (e.g., as a source of information or for modulation). +of a `UserDefinedFunction`. It must be carefully coordinated with the execution of other learning-related Components in +the Composition, to insure that each function is called at the appropriate times during execution. Furthermore, as +with an `AutodiffComposition`, the internal constituents of the object (e.g., intermediates layers of a neural network +model) are not accessible to other Components in the Composition (e.g., as a source of information or for modulation). .. _Composition_Execution: @@ -846,12 +978,16 @@ Executing a Composition ----------------------- + - `Execution Methods ` - `Composition_Execution_Inputs` - - `Composition_Runtime_Params` - - `Composition_Cycles_and_Feedback` - - `Composition_Execution_Context` - - `Composition_Reset` - - `Composition_Compilation` + - `Composition_Execution_Factors` + • `Composition_Runtime_Params` + • `Composition_Cycles_and_Feedback` + • `Composition_Execution_Context` + • `Composition_Timing` + • `Composition_Reset` + • `Composition_Compilation` + - `Results, Reporting and Logging ` .. _Composition_Execution_Methods: @@ -880,50 +1016,14 @@ then `learn ` is called; otherwise, `run ` is called. In either case, the return value of the corresponding method is returned. -.. _Composition_Execution_Reporting: - -*Results, Reporting and Logging*. Executing a Composition returns the results of its last `TRIAL ` of -execution. If either `run ` or `learn ` is called, the results of all `TRIALS -` executed are available in the Composition's `results ` attribute (see `Results -` for additional details). A report of the results of each `TRIAL ` -can also be generated as the Compostion is executing, using the **report_output** and **report_progress** arguments -of any of the execution methods. **report_output** (specified using `ReportOutput` options) generates a report of the -input and output of the Composition and its `Nodes `, and optionally their `Parameters` (specified -in the **report_params** arg using `ReportParams` options); **report_progress** (specified using `ReportProgress` -options) shows a progress bar indicating how many `TRIALS ` have been executed and an estimate of -the time remaining to completion. These options are all OFF by default (see `Report` for additional details). -The values of individual Components (and their `parameters `) assigned during execution can also be -recorded in their `log ` attribute using the `Log` facility. - -*Inputs*. All methods of executing a Composition require specification of an **inputs** argument, which designates -the values assigned to the `INPUT` `Nodes ` of the Composition for each `TRIAL `. -A `TRIAL ` is defined as the opportunity for every Node in the Composition to execute for a given -set of inputs. The inputs for each `TRIAL ` can be specified using an `input dictionary -`; for the `run ` and `learn ` methods, they -can also be specified `programmatically ` (see `Composition_Execution_Inputs`). The -same number of inputs must be specified for every `INPUT` Node, unless only one value is specified for a Node (in -which case that value is provided as the input to that Node for all `TRIAL `\\s executed). If the -**inputs** argument is not specified for the `run ` or `execute ` methods, -the `default_variable ` for each `INPUT` Node is used as its input on `TRIAL `. -If it is not specified for the `learn ` method, an error is generated unless its **targets** -argument is specified (see `below `). - - -.. _Composition_Execution_Results: - -*Results*. At the end of a `TRIAL ` (a list of -the `output_values ` for all of its `OUTPUT` Nodes) are added to the Composition's -`results ` attribute, and the `output_values ` for the last `TRIAL -` executed is returned by the `execution method `. - .. _Composition_Execution_Num_Trials: *Number of trials*. If the the `execute ` method is used, a single `TRIAL ` is executed; if the **inputs** specifies more than one `TRIAL `\\s worth of input, an error is generated. For the `run ` and `learn `, the **num_trials** argument can be used to specify -the number of `TRIAL `\\s to execute; if its value execeeds the number of inputs provided for each -Node in the **inputs** argument, then the inputs are recycled from the beginning of the lists, until the number of -`TRIAL `\\s specified in **num_trials** has been executed. If **num_trials** is not specified, +an exact number of `TRIAL `\\s to execute; if its value execeeds the number of inputs provided for +each Node in the **inputs** argument, then the inputs are recycled from the beginning of the lists, until the number +of `TRIAL `\\s specified in **num_trials** has been executed. If **num_trials** is not specified, then a number of `TRIAL `\\s is executed equal to the number of inputs provided for each `Node ` in **inputs** argument. @@ -933,21 +1033,46 @@ its `learn ` method must be used in place of the `run ` method, and its `disable_learning ` attribute must be False (the default). A set of targets must also be specified (see `below `). The `run ` and `execute ` -methods can also be used to execute the Composition, but no learning will occur, irrespective of the value of the -`disable_learning ` attribute. +methods can also be used to execute a Composition that has been `configured for learning `, +but no learning will occur, irrespective of the value of the `disable_learning ` +attribute. + +The sections that follow describe the formats that can be used for inputs, factors that impact execution, and +how the results of execution are recorded and reported. + .. _Composition_Execution_Inputs: -*Input formats (including targets for learning)* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*Composition Inputs* +~~~~~~~~~~~~~~~~~~~~ + - `Composition_Input_Dictionary` - `Composition_Programmatic_Inputs` -The **inputs** argument of the Composition's `execution methods ` (and, for learning, -the **targets** argument of the `learn ` method) is used to specify the inputs to the Composition -for each `TRIAL `. These are provided to the Composition's `INPUT` `Nodes ` -(including its `TARGET_MECHANISMS ` for learning) each time it is executed. There are -two ways to specify inputs: +All `methods of executing a Composition require specification of an **inputs** +argument (and a **targets** argument for `learn ` method), which designates the values assigned +to the `INPUT ` `(and, for learning, the `TARGET `) Nodes ` +of the Composition. These are provided to the Composition each time it is executed; that is, for each `TRIAL +`. A `TRIAL ` is defined as the opportunity for every Node in the Composition +to execute the current set of inputs. The inputs for each `TRIAL ` can be specified using an `input +dictionary `; for the `run ` and `learn ` methods, +they can also be specified `programmatically `. Irrespective of format, the same +number of inputs must be specified for every `INPUT` Node, unless only one value is specified for a Node (in which +case that value is provided as the input to that Node for every `TRIAL `\\s executed). If the +**inputs** argument is not specified for the `run ` or `execute ` methods, +the `default_variable ` for each `INPUT` Node is used as its input on `TRIAL `. +If it is not specified for the `learn ` method, an error is generated unless its **targets** +argument is specified (see `below `). The Composition's `get_input_format() +` method can be used to show a template for how inputs should be formatted for the +Composition, as well as the `INPUT ` Nodes to which they are assigned. The formats are described in +more detail below. + +.. _Composition_Input_Formats: + +*Input formats (including targets for learning)* +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There are two ways to specify inputs: * using `a dictionary `, in which the inputs are specified or each `TRIAL ` explicitly; @@ -965,7 +1090,8 @@ in the `external_input_ports ` attribute of the Composition's `INPUT` `Mechanisms `, and the corresponding attribute (`external_input_ports `) of any `nested Composition ` that is an `INPUT` Node of the Composition being executed -(see `above `) +(see `above `). The format required can also be seen using the +`get_input_format() ` method. .. note:: Most Mechanisms have only a single `InputPort`, and thus require only a single input to be specified for them @@ -978,15 +1104,15 @@ of a `nested Composition `, based on the Mechanisms (and/or additionally nested Compositions) that comprise its set of `INPUT` `Nodes `. -These factors determine the format of each entry in an `inputs dictionary `, or the -return value of the function or generator used for `programmatic specification ` -of inputs, as described in detail below (also see `examples `). +The factors above determine the format of each entry in an `inputs dictionary `, or the +return value of the function or generator used for `programmatic specification ` of +inputs, as described in detail below (also see `examples `). .. _Composition_Input_Dictionary: -Input Dictionary -^^^^^^^^^^^^^^^^ +*Input Dictionary* +================== The simplest way to specificy inputs (including targets for learning) is using a dict, in which each entry specifies the inputs to a given `INPUT` `Node `. The key of each entry is a Node, and the value is a list of @@ -1009,14 +1135,23 @@ input, and for which only one input is specified (``[1.0]``), which is therefore provided as the input to Mechanism ``c`` on every `TRIAL `. -Each input value must be compatible with the number of `InputPorts ` that receive external input for -that Node. These are listed in its ``external_input_ports`` attribute (`here ` -if it is Mechanism, or `here ` if it is a Composition). More specifically, the -shape of the input value must be compatible with the shape of the Node's `external_input_values` attribute (`here -` if it is Mechanism, or `here ` if it is -a Composition). While these are always 2d arrays, the number and size of the items (corresponding to each InputPort) -may vary; in some case shorthand notations are allowed, as illustrated in the `examples -` below. +The input specified for each `Node ` must be compatible with the number of `InputPorts ` +that receive external input for that Node. These are listed in its ``external_input_ports`` attribute (`here +` if it is Mechanism, or `here ` if it is a +Composition). More specifically, the shape of the input value must be compatible with the shape of the Node's +`external_input_values` attribute (`here ` if it is Mechanism, +or `here ` if it is a Composition). While these are always 2d arrays, the number +and size of the items (corresponding to each InputPort) may vary; in some case shorthand notations are allowed, +as illustrated in the `examples ` below. + +.. _Composition_Input_Labels: + +In general, the value of inputs should be numeric arrays; however, some Mechanisms have an `input_labels_dict +` that specifies a mapping from strings (labels) to numeric values, in which those +strings can be used to specify inputs to that Mechanism (these are translated to their numeric values on execution). +However, such labels are specific to a given Mechanism; use of strings as input to a Mechanism that does not have an +`input_labels_dict ` specified, or use of a string that is not listed in the +dictionary for that Mechanism generates and error. .. _Composition_Target_Inputs: @@ -1029,10 +1164,13 @@ `, or the corresponding `TARGET_MECHANISM `. The value of each entry specifies the inputs for each trial, formatted asdescribed `above `. +The input format required for a Composition, and the `INPUT ` Nodes to which inputs are assigned, +can be seen using its `get_input_format ` method. + .. _Composition_Programmatic_Inputs: -Specifying Inputs Programmatically -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +*Specifying Inputs Programmatically* +==================================== Inputs can also be specified programmticaly, in a `TRIAL ` by `TRIAL ` manner, using a function, generator, or generator function. @@ -1175,10 +1313,23 @@ def input_function(env, result): comp.run(inputs=input_dictionary) COMMENT + +.. _Composition_Execution_Factors: + +*Execution Factors* +~~~~~~~~~~~~~~~~~~~ + + • `Composition_Runtime_Params` + • `Composition_Cycles_and_Feedback` + • `Composition_Execution_Context` + • `Composition_Timing` + • `Composition_Reset` + • `Composition_Compilation` + .. _Composition_Runtime_Params: *Runtime Parameters* -~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^ COMMENT: 5/8/20 @@ -1215,7 +1366,7 @@ def input_function(env, result): - *value* - (, `Condition`), , or subdictionary (see below) `Condition` specifies when the value is applied; otherwise, its previously assigned value or `default ` is used; if the parameter values appears alone in a tuple or outside of one, - then the Condtion `Always` is applied. + then the Condition `Always()` is applied. See `Runtime Parameter Specification Dictionary ` for additional details. @@ -1230,7 +1381,7 @@ def input_function(env, result): .. _Composition_Cycles_and_Feedback: *Cycles and Feedback* -~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^ * `Composition_Cycle` * `Composition_Feedback` @@ -1251,8 +1402,9 @@ def input_function(env, result): .. _Composition_Cycle: -Cycles and synchronous execution -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +*Cycles and Synchronous Execution* +================================== + .. _Composition_Cycle_Structure: @@ -1317,8 +1469,8 @@ def input_function(env, result): .. _Composition_Feedback: -Feedback and sequential execution -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +*Feedback and Sequential Execution* +=================================== .. _Composition_Feedback_Designation: @@ -1338,14 +1490,14 @@ def input_function(env, result): or the keyword *FEEDBACK* forces its assignment as a *feedback* Projection, whereas False precludes it from being assigned as a feedback Projection (e.g., a `ControlProjection` that otherwise forms a cycle will no longer do so). -.. warning:: - Designating a Projection as **feeedback** that is *not* in a loop is allowed, but will issue a warning and - can produce unexpected results. Designating more than one Projection as **feedback** within a loop is also - permitted, by can also lead to complex and unexpected results. In both cases, the `FEEDBACK_RECEIVER` for any - Projection designated as **feedback** will receive a value from the Projection that is based either on the - `FEEDBACK_SENDER`\\'s initial_value (the first time it is executed) or its previous `value ` - (in subsequent executions), rather than its most recently computed `value ` whether or not it - is in a `cycle ` (see `below `). + .. warning:: + Designating a Projection as **feeedback** that is *not* in a loop is allowed, but will issue a warning and + can produce unexpected results. Designating more than one Projection as **feedback** within a loop is also + permitted, by can also lead to complex and unexpected results. In both cases, the `FEEDBACK_RECEIVER` for any + Projection designated as **feedback** will receive a value from the Projection that is based either on the + `FEEDBACK_SENDER`\\'s initial_value (the first time it is executed) or its previous `value ` + (in subsequent executions), rather than its most recently computed `value ` whether or not it + is in a `cycle ` (see `below `). .. _Composition_Feedback_Sequential_Execution: @@ -1375,7 +1527,7 @@ def input_function(env, result): .. _Composition_Execution_Context: *Execution Contexts* -~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^ A Composition is always executed in a designated *execution context*, specified by an `execution_id ` that can be provided to the **context** argument of the method used to execute the @@ -1413,30 +1565,30 @@ def input_function(env, result): See `Composition_Examples_Execution_Context` for examples. -COMMENT: -For Developers +.. technical_note:: + + .. _Composition_Execution_Contexts_Init: -.. _Composition_Execution_Contexts_Init: + **Initialization of Execution Contexts** -Initialization of Execution Contexts -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + - The parameter values for any execution context can be copied into another execution context by using + Component._initialize_from_context, which when called on a Component copies the values for all its parameters + and recursively for all of the Component's `_dependent_components `. -- The parameter values for any execution context can be copied into another execution context by using \ -Component._initialize_from_context, which when called on a Component copies the values for all its parameters \ -and recursively for all of the Component's `_dependent_components ` + - `_dependent_components ` should be added to for any new Component that requires + other Components to function properly (beyond "standard" things like Component.function, or Mechanism.input_ports, + as these are added in the proper classes' _dependent_components). -- `_dependent_components ` should be added to for any new Component that requires \ -other Components to function properly (beyond "standard" things like Component.function, \ -or Mechanism.input_ports, as these are added in the proper classes' _dependent_components) - - the intent is that with ``_dependent_components`` set properly, calling \ - ``obj._initialize_from_context(new_context, base_context)`` should be sufficient to run obj \ - under **new_context** - - a good example of a "nonstandard" override is `OptimizationControlMechanism._dependent_components` + - The intent is that with `_dependent_components ` set properly, calling + ``obj._initialize_from_context(new_context, base_context)`` should be sufficient to run obj under + **new_context**. + + - A good example of a "nonstandard" override is `OptimizationControlMechanism._dependent_components` .. _Composition_Timing: *Timing* -~~~~~~~~ +^^^^^^^^ When `run ` is called by a Composition, it calls that Composition's `execute ` method once for each `input ` (or set of inputs) specified in the call to `run @@ -1446,16 +1598,14 @@ def input_function(env, result): `termination condition ` is met. The `scheduler ` can be used in combination with `Condition` specifications for individual Components to execute different Components at different time scales. -COMMENT .. _Composition_Reset: -*Resetting Parameters of stateful* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*Resetting Stateful Parameters* +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ COMMENT: - MOVE TO IntegratorMechanism and relevant arguments of run method - DIFERENT FROM INTEGRATOR REINITIALIZATION, WHICH USES/HAS: - reset_integrator_nodes_to: DICT of {node: value} paris @@ -1472,10 +1622,9 @@ def input_function(env, result): - reset_integrator_when attribute of IntegratorMechanism: SPECIFIES WHEN INTEGRATOR IS RESET UNLESS IT IS NODE INCLUDED IN reset_integrator_nodes_when ARG OF RUN - previous_integrator_value attribute (INTERGRATOR) vs. previous value of value attribute (CYCLE) - COMMENT -`stateful ` (such as `IntegratorFunctions ` and "non-parametric" +`Stateful Functions ` (such as `IntegratorFunctions ` and "non-parametric" `MemoryFunctions `) have a `previous_value ` attribute that maintains a record of the Function's `values ` for each `execution context ` in which it is executed, within and between calls to the Composition's `execute methods `. @@ -1544,26 +1693,30 @@ def input_function(env, result): .. _Composition_Compilation: *Compilation* -~~~~~~~~~~~~~ +^^^^^^^^^^^^^ By default, a Composition is executed using the Python interpreter used to run the script from which it is called. In -many cases, a Composition can also be executed in a compiled mode. While this can add some time to initiate execution, -execution itself can be several orders of magnitude faster than using the Python interpreter. Thus, using a compiled -mode can be useful for executing Compositions that are complex and/or for large numbers of `TRIAL `\\s. -Compilation is supported for most CPUs (including x86, arm64, and powerpc64le). Several modes can be specified, that -that tradeoff power (i.e., degree of speed-up) against level of support (i.e., likelihood of success). Most PsyNeuLink -`Components ` and methods are supported for compilation; however, Python native functions and methods -(e.g., used to specify the `function ` of a Component) are not supported at present. Users who wish -to compile custom functions should refer to `compiled User Defined Functions ` for more -information. Users are strongly urged to report any other compilation failures to -psyneulinkhelp@princeton.edu, or as an issue `here `_. -Known failure conditions are listed `here `_. - -.. warning:: - Compiled modes are continuing to be developed and refined, and therefore it is still possible that there are - bugs that will not cause compilation to fail, but could produce erroneous results. Therefore, it is strongly - advised that if compilation is used, suitable tests are conducted that the results generated are identical to - those generated when the Composition is executed using the Python interpreter. +many cases, a Composition can also be executed in a `compiled mode `. While this can add some time to +initiate execution, execution itself can be several orders of magnitude faster than using the Python interpreter. Thus, +using a compiled mode can be useful for executing Compositions that are complex and/or for large numbers of `TRIAL +`\\s. `Compilation` is supported for most CPUs (including x86, arm64, and powerpc64le). Several modes +can be specified, that that tradeoff power (i.e., degree of speed-up) against level of support (i.e., likelihood of +success). Most PsyNeuLink `Components ` and methods are supported for compilation; however, Python native +functions and methods (e.g., used to specify the `function ` of a Component) are not supported at +present. Users who wish to compile custom functions should refer to `compiled User Defined Functions +` for more information. See below and `Compilation` for additional details regarding the use +of compiled modes of execution, and `Vesely et al. (2022) `_ +for more information about the approach taken to compilation. + + .. warning:: + Compiled modes are continuing to be developed and refined, and therefore it is still possible that there are + bugs that will not cause compilation to fail, but could produce erroneous results. Therefore, it is strongly + advised that if compilation is used, suitable tests are conducted that the results generated are identical to + those generated when the Composition is executed using the Python interpreter. + + Users are strongly urged to report any compilation failures to psyneulinkhelp@princeton.edu, or as an + issue `here `_. Known failure conditions are listed + `here `_. .. _Composition_Compiled_Modes: @@ -1610,21 +1763,59 @@ def input_function(env, result): `this `_ for progress extending support of parallization in compiled modes). -COMMENT: + +.. _Composition_Execution_Results_and_Reporting: + +*Results, Reporting and Logging* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _Composition_Execution_Results: + +*Results* + +Executing a Composition returns the results of the last `TRIAL ` executed. If either `run +` or `learn ` is called, the results of all `TRIALS ` executed +are available in the Composition's `results ` attribute. More specifically, at the end of a +`TRIAL ` (a list of the `output_values +` for all of its `OUTPUT ` `Nodes `) are added to +the Composition's `results ` attribute, and the `output_values ` for the +last `TRIAL ` executed is returned by the `execution method `. The +`output_values ` of the last `TRIAL ` for each `OUTPUT ` +can be seen using the Composition's `get_results_by_nodes ` method. + +.. _Composition_Execution_Reporting: + +*Reporting* + +A report of the results of each `TRIAL ` can be generated as the Composition is executing, using the +**report_output** and **report_progress** arguments of any of the `execution methods `. +**report_output** (specified using `ReportOutput` options) generates a report of the input and output of the +Composition and its `Nodes `, and optionally their `Parameters` (specified in the +**report_params** arg using `ReportParams` options); **report_progress** (specified using `ReportProgress` options) +shows a progress bar indicating how many `TRIALS ` have been executed and an estimate of the time +remaining to completion. These options are all OFF by default (see `Report` for additional details). + +.. _Composition_Execution_Logging: + +*Logging* + +The values of individual Components (and their `parameters `) assigned during execution can also be +recorded in their `log ` attribute using the `Log` facility. + + .. _Composition_Visualization: Visualizing a Composition ------------------------- -XCOMMENTX: +COMMENT: XXX - ADD EXAMPLE OF NESTED COMPOSITION XXX - ADD DISCUSSION OF show_controller AND show_learning -XCOMMENTX +COMMENT The `show_graph ` method generates a display of the graph structure of `Nodes -` and `Projections ` in the Composition (based on the Composition's `graph -`). -COMMENT +` and `Projections ` in the Composition based on the Composition's `graph +` (see `Visualization` for additional details). .. _Composition_Examples: @@ -2104,7 +2295,7 @@ def input_function(env, result): *Runtime Parameters* ~~~~~~~~~~~~~~~~~~~~ -If a runtime parameter is meant to be used throughout the `Run`, then the `Condition` may be omitted and the `Always` +If a runtime parameter is meant to be used throughout the `Run`, then the `Condition` may be omitted and the `Always()` `Condition` will be assigned by default: >>> import psyneulink as pnl @@ -2360,6 +2551,7 @@ def input_function(env, result): import warnings from copy import deepcopy, copy from inspect import isgenerator, isgeneratorfunction +from typing import Union import graph_scheduler import networkx @@ -2370,15 +2562,16 @@ def input_function(env, result): from psyneulink.core import llvm as pnlvm from psyneulink.core.components.component import Component, ComponentsMeta -from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination, PredictionErrorDeltaFunction from psyneulink.core.components.functions.function import is_function_type +from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination, \ + PredictionErrorDeltaFunction from psyneulink.core.components.functions.nonstateful.learningfunctions import \ LearningFunction, Reinforcement, BackPropagation, TDLearning from psyneulink.core.components.functions.nonstateful.transferfunctions import Identity from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base, MechanismError, MechanismList from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism -from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import \ - OptimizationControlMechanism, AGENT_REP +from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import AGENT_REP, \ + RANDOMIZATION_CONTROL_SIGNAL from psyneulink.core.components.mechanisms.modulatory.learning.learningmechanism import \ LearningMechanism, ACTIVATION_INPUT_INDEX, ACTIVATION_OUTPUT_INDEX, ERROR_SIGNAL, ERROR_SIGNAL_INDEX from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base @@ -2394,7 +2587,8 @@ def input_function(env, result): from psyneulink.core.components.projections.modulatory.learningprojection import LearningProjection from psyneulink.core.components.projections.modulatory.modulatoryprojection import ModulatoryProjection_Base from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection, MappingError -from psyneulink.core.components.projections.projection import ProjectionError, DuplicateProjectionError +from psyneulink.core.components.projections.pathway.pathwayprojection import PathwayProjection_Base +from psyneulink.core.components.projections.projection import Projection_Base, ProjectionError, DuplicateProjectionError from psyneulink.core.components.shellclasses import Composition_Base from psyneulink.core.components.shellclasses import Mechanism, Projection from psyneulink.core.compositions.report import Report, \ @@ -2403,14 +2597,15 @@ def input_function(env, result): from psyneulink.core.compositions.showgraph import ShowGraph, INITIAL_FRAME, SHOW_CIM, EXECUTION_SET, SHOW_CONTROLLER from psyneulink.core.globals.context import Context, ContextFlags, handle_external_context from psyneulink.core.globals.keywords import \ - AFTER, ALL, ANY, BEFORE, COMPONENT, COMPOSITION, CONTROLLER, CONTROL_SIGNAL, DEFAULT, \ - FEEDBACK, HARD_CLAMP, IDENTITY_MATRIX, INPUT, INPUT_PORTS, INPUTS, INPUT_CIM_NAME, LEARNED_PROJECTIONS, \ - LEARNING_FUNCTION, LEARNING_MECHANISM, LEARNING_MECHANISMS, LEARNING_PATHWAY, \ - MATRIX, MATRIX_KEYWORD_VALUES, MAYBE, MODEL_SPEC_ID_COMPOSITION, MODEL_SPEC_ID_NODES, MODEL_SPEC_ID_PROJECTIONS, \ - MODEL_SPEC_ID_PSYNEULINK, \ - MODEL_SPEC_ID_RECEIVER_MECH, MODEL_SPEC_ID_SENDER_MECH, MONITOR, MONITOR_FOR_CONTROL, NAME, NESTED, NO_CLAMP, \ - OBJECTIVE_MECHANISM, ONLINE, OUTCOME, OUTPUT, OUTPUT_CIM_NAME, OUTPUT_MECHANISM, OUTPUT_PORTS, OWNER_VALUE, \ - PARAMETER, PARAMETER_CIM_NAME, PROCESSING_PATHWAY, PROJECTION, PULSE_CLAMP, \ + AFTER, ALL, ALLOW_PROBES, ANY, BEFORE, COMPONENT, COMPOSITION, CONTROL, CONTROL_SIGNAL, CONTROLLER, DEFAULT, \ + FEEDBACK, FUNCTION, HARD_CLAMP, IDENTITY_MATRIX, INPUT, INPUT_PORTS, INPUTS, INPUT_CIM_NAME, \ + LEARNED_PROJECTIONS, LEARNING_FUNCTION, LEARNING_MECHANISM, LEARNING_MECHANISMS, LEARNING_PATHWAY, \ + MATRIX, MATRIX_KEYWORD_VALUES, MAYBE, \ + MODEL_SPEC_ID_COMPOSITION, MODEL_SPEC_ID_NODES, MODEL_SPEC_ID_PROJECTIONS, MODEL_SPEC_ID_PSYNEULINK, \ + MODEL_SPEC_ID_RECEIVER_MECH, MODEL_SPEC_ID_SENDER_MECH, \ + MONITOR, MONITOR_FOR_CONTROL, NAME, NESTED, NO_CLAMP, OBJECTIVE_MECHANISM, ONLINE, OUTCOME, \ + OUTPUT, OUTPUT_CIM_NAME, OUTPUT_MECHANISM, OUTPUT_PORTS, OWNER_VALUE, \ + PARAMETER, PARAMETER_CIM_NAME, PROCESSING_PATHWAY, PROJECTION, PROJECTION_TYPE, PROJECTION_PARAMS, PULSE_CLAMP, \ SAMPLE, SHADOW_INPUTS, SOFT_CLAMP, SSE, \ TARGET, TARGET_MECHANISM, VARIABLE, WEIGHT, OWNER_MECH from psyneulink.core.globals.log import CompositionLog, LogCondition @@ -2419,7 +2614,7 @@ def input_function(env, result): from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel, _assign_prefs from psyneulink.core.globals.registry import register_category from psyneulink.core.globals.utilities import \ - ContentAddressableList, call_with_pruned_args, convert_to_list, convert_to_np_array + ContentAddressableList, call_with_pruned_args, convert_to_list, convert_to_np_array, is_numeric from psyneulink.core.scheduling.condition import All, AllHaveRun, Always, Any, Condition, Never from psyneulink.core.scheduling.scheduler import Scheduler, SchedulingMode from psyneulink.core.scheduling.time import Time, TimeScale @@ -2436,7 +2631,6 @@ def input_function(env, result): 'Composition', 'CompositionError', 'CompositionRegistry', 'EdgeType', 'get_compositions', 'NodeRole' ] - logger = logging.getLogger(__name__) CompositionRegistry = {} @@ -2444,8 +2638,9 @@ def input_function(env, result): class CompositionError(Exception): - def __init__(self, error_value): + def __init__(self, error_value, **kwargs): self.error_value = error_value + self.return_items = kwargs def __str__(self): return repr(self.error_value) @@ -2546,23 +2741,22 @@ def feedback(self, value: EdgeType): class Graph(object): - """ - A Graph of vertices and edges. + """A Graph of vertices and edges. - Attributes - ---------- + Attributes + ---------- - comp_to_vertex : Dict[`Component ` : `Vertex`] - maps `Component` in the graph to the `Vertices ` that represent them. + comp_to_vertex : Dict[`Component ` : `Vertex`] + maps `Component` in the graph to the `Vertices ` that represent them. - vertices : List[Vertex] - the `Vertices ` contained in this Graph; each can be a `Node ` or a - `Projection `. + vertices : List[Vertex] + the `Vertices ` contained in this Graph; each can be a `Node ` or a + `Projection `. - dependency_dict : Dict[`Component` : Set(`Component`)] - maps each of the graph's Components to the others from which it receives input - (i.e., their `value `). For a `Node `, this is one or more - `Projections `; for a Projection, it is a single Node. + dependency_dict : Dict[`Component` : Set(`Component`)] + maps each of the graph's Components to the others from which it receives input + (i.e., their `value `). For a `Node `, this is one or more + `Projections `; for a Projection, it is a single Node. """ @@ -2824,9 +3018,13 @@ class NodeRole(enum.Enum): INPUT A `Node ` that receives input from outside its `Composition`, either from the Composition's `run ` method or, if it is in a `nested Composition `, from the outer - outer Composition. By default, the `ORIGIN` Nodes of a Composition are also its `INPUT` Nodes; however this - can be modified by `assigning specified NodeRoles ` to Nodes. A Composition - can have many `INPUT` Nodes. + Composition. By default, the `ORIGIN` Nodes of a Composition are also its `INPUT` Nodes; however this can be + modified by `assigning specified NodeRoles ` to Nodes. A Composition can + have many `INPUT` Nodes. Note that any Node that `shadows ` an `INPUT` Node is itself + also assigned the role of `INPUT` Node. + + PROBE + A `Node ` that is neither `ORIGIN` nor `TERMINAL` but that is treated as an SINGLETON A `Node ` that is both an `ORIGIN` and a `TERMINAL`. This role cannot be modified @@ -2889,15 +3087,18 @@ class NodeRole(enum.Enum): `; usually a `ComparatorMechanism` (see `OBJECTIVE_MECHANISM`). This role can, but generally should not be modified programmatically. + PROBE + An `INTERNAL` `Node ` that is permitted to have Projections from it to the Composition's + `output_CIM `, but -- unlike an `OUTPUT` Node -- the `output_values + ` of which are *not* included in the Composition's `results + ` attribute (see `allow_probes ` for an + example. + OUTPUT A `Node ` the `output_values ` of which are included in the Composition's `results ` attribute. By default, the `TERMINAL` Nodes of a Composition are also its `OUTPUT` Nodes; however this can be modified by `assigning specified NodeRoles ` to Nodes. A Composition can have many `OUTPUT` Nodes. - COMMENT: - .. technical_note:: - TEST - COMMENT TERMINAL A `Node ` that does not send any `Projections ` to any other Nodes within @@ -2924,6 +3125,7 @@ class NodeRole(enum.Enum): LEARNING = enum.auto() TARGET = enum.auto() LEARNING_OBJECTIVE = enum.auto() + PROBE = enum.auto() OUTPUT = enum.auto() TERMINAL = enum.auto() @@ -2934,11 +3136,14 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): pathways=None, \ nodes=None, \ projections=None, \ + allow_probes=True, \ + include_probes_in_output=False \ disable_learning=False, \ controller=None, \ enable_controller=None, \ controller_mode=AFTER, \ - controller_condition=Always, \ + controller_time_scale=TRIAL \ + controller_condition=Always(), \ retain_old_simulation_data=None, \ show_graph_attributes=None, \ name=None, \ @@ -2962,28 +3167,44 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): specifies one or more `Projections ` to add to the Composition; these are not functional unless they are explicitly assigned a `sender ` and `receiver `. + allow_probes : bool : default True + specifies whether `Projections ` are allowed from `Nodes ` of a `nested + Composition ` other than its OUTPUT ` `Nodes ` to + Nodes in outer Composition(s) (see `allow_probes ` for additional information). + + include_probes_in_output : bool : default False + specifies whether the outputs of `PROBE ` Nodes within a `nested Composition + ` are included in the `output_values ` and `results + ` of the Composition to which they project If False, the outputs of `PROBE + ` Nodes *are excluded* from those attributes; if True (the default) they are included + (see `Probes ` for additional details). + disable_learning: bool : default False specifies whether `LearningMechanisms ` in the Composition are executed when run in `learning mode `. - controller : `OptimizationControlmechanism` : default None - specifies the `OptimizationControlMechanism` to use as the Composition's `controller - ` (see `Composition_Controller` for details). + controller : `OptimizationControlMechanism` : default None + specifies the `OptimizationControlMechanism` to use as the `Composition's controller + `. enable_controller: bool : default None specifies whether the Composition's `controller ` is executed when the - Composition is executed. Set to True by default if **controller** specified; if set to False, - the `controller ` is ignored when the Composition is executed. + Composition is run. Set to True by default if **controller** specified (see `enable_controller + ` for additional details). controller_mode: enum.Enum[BEFORE|AFTER] : default AFTER - specifies whether the controller is executed before or after the rest of the Composition - in each run, trial, pass, or time step. Must be either the keyword *BEFORE* or *AFTER*. + specifies whether the `controller ` is executed before or after the rest of the + Composition when it is run, at the `TimeScale` specified by **controller_time_scale**). Must be either the + keyword *BEFORE* or *AFTER* (see `controller_mode ` for additional details). controller_time_scale: TimeScale[TIME_STEP, PASS, TRIAL, RUN] : default TRIAL - specifies with what frequency the the controller should be executed. + specifies the frequency at which the `controller ` is executed, either before or + after the Composition is run as specified by **controller_mode** (see `controller_time_scale + ` for additional details). - controller_condition: Condition : default Always - specifies when the Composition's `controller ` is executed in a trial. + controller_condition: Condition : default Always() + specifies a specific `Condition` for whether the Composition's `controller ` is + executed in a trial (see `controller_condition ` for additional details). retain_old_simulation_data : bool : default False specifies whether or not to retain Parameter values generated during `simulations @@ -2991,9 +3212,9 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): ` for additional details). show_graph_attributes : dict : None - specifies state_features of how the Composition is displayed when its `show_graph ` method - is called or **animate** is specified in a call to its `run ` method (see `ShowGraph` for - list of attributes and their values). + specifies features of how the Composition is displayed when its `show_graph ` + method is called or **animate** is specified in a call to its `run ` method + (see `ShowGraph` for list of attributes and their values). name : str : default see `name ` specifies the name of the Composition. @@ -3017,6 +3238,25 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): FIX: HOW IS THIS DIFFERENT THAN Composition.nodes? COMMENT + allow_probes : bool or CONTROL + indicates whether `Projections ` are allowed to `Nodes ` in the Composition + from ones of a `nested Composition ` other than its OUTPUT ` `Nodes + `. If *allow_probes* is False, Projections can be received from only the `OUTPUT + ` Nodes of a nested Composition; if it is True (the default), Projections can be received + from any Nodes of a nested Composition, including its `INPUT ` and `INTERNAL + ` Nodes; if it is assigned *CONTROL*, then only the Composition's `controller + ` or its `objective_mechanism ` can receive + Projections from such Nodes. Any Nodes of a nested Composition that project to an enclosing Composition, + other than its `OUTPUT ` Nodes, are assigned `PROBE ` in addition to their + other roles (see `Probes ` for additional information). + + include_probes_in_output : bool : default False + determines whether the outputs of `PROBE ` Nodes within a `nested Composition + ` are included in the `output_values ` and `results + ` of the Composition to which they project. If False, the outputs of `PROBE + ` Nodes *are excluded* from those attributes; if True (the default) they are included + (see `Probes ` for additional details). + required_node_roles : list[(`Mechanism ` or `Composition`, `NodeRole`)] a list of tuples, each containing a `Node ` and a `NodeRole` assigned to it. @@ -3038,16 +3278,36 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): mechanisms : `MechanismList` list of Mechanisms in Composition, that provides access to some of they key attributes. + random_variables : list[Component] + list of Components in Composition with variables that call a randomization function. + + .. technical_note:: + These are Components with a seed `Parameter`. + pathways : ContentAddressableList[`Pathway`] a list of all `Pathways ` in the Composition that were specified in the **pathways** argument of the Composition's constructor and/or one of its `Pathway addition methods - `; each item is a list of nodes (`Mechanisms ` and/or - Compositions) intercolated with the `Projection ` between each pair of nodes. + `; each item is a list of `Nodes ` + (`Mechanisms ` and/or Compositions) intercolated with the `Projection(s) ` between each + pair of Nodes; both Nodes are Mechanism, then only a single Projection can be specified; if either is a + Composition then, under some circumstances, there can be a set of Projections, specifying how the `INPUT + ` Node(s) of the sender project to the `OUTPUT ` Node(s) of the receiver + (see `add_linear_processing_pathway` for additional details). + + projections : ContentAddressableList[`Projection`] + a list of all of the `Projections ` activated for the Composition; this includes all of + the Projections among `Nodes ` within the Composition, as well as from its `input_CIM + ` to it *INPUT* Nodes;from its `parameter_CIM ` to + the corresponding `ParameterPorts `; from its *OUTPUT* Nodes to its `output_CIM + `; and, if it is `nested ` in another Composition, then the + Projections to its `input_CIM ` and from its `output_CIM ` + to other Nodes in the Comopsition within which it is nested. input_CIM : `CompositionInterfaceMechanism` mediates input values for the `INPUT` `Nodes ` of the Composition. If the Composition is `nested `, then the input_CIM and its `InputPorts serve as proxies for the - Composition itself for its afferent `PathwayProjections `. + Composition itself for its afferent `PathwayProjections ` (see `input_CIM + ` for additional details). input_CIM_ports : dict a dictionary in which the key of each entry is the `InputPort` of an `INPUT` `Node ` in @@ -3065,7 +3325,8 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): parameter_CIM : `CompositionInterfaceMechanism` mediates modulatory values for all `Nodes ` of the Composition. If the Composition is `nested `, then the parameter_CIM and its `InputPorts ` serve as proxies for - the Composition itself for its afferent `ModulatoryProjections `. + the Composition itself for its afferent `ModulatoryProjections ` (see `parameter_CIM + ` for additional details). parameter_CIM_ports : dict a dictionary in which keys are `ParameterPorts ` of `Nodes ` in the @@ -3097,7 +3358,8 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): output_CIM : `CompositionInterfaceMechanism` aggregates output values from the OUTPUT nodes of the Composition. If the Composition is nested, then the - output_CIM and its OutputPorts serve as proxies for Composition itself in terms of efferent projections. + output_CIM and its OutputPorts serve as proxies for Composition itself in terms of efferent projections + (see `output_CIM ` for additional details). output_CIM_ports : dict a dictionary in which the key of each entry is the `OutputPort` of an `OUTPUT` `Node ` in @@ -3116,33 +3378,41 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): cims : list a list containing references to the Composition's `input_CIM `, - `parameter_CIM `, and `output_CIM `. + `parameter_CIM `, and `output_CIM ` + (see `Composition_CIMs` for additional details). env : Gym Forager Environment : default: None stores a Gym Forager Environment so that the Composition may interact with this environment within a single call to `run `. shadows : dict - a dictionary in which the keys are all in the Composition and the values are lists of any Nodes that - `shadow ` the original Node's input. + a dictionary in which the keys are all `Nodes ` in the Composition, + and the values of each is a list of any Nodes that `shadow ` it's input. controller : OptimizationControlMechanism identifies the `OptimizationControlMechanism` used as the Composition's controller (see `Composition_Controller` for details). enable_controller : bool - determines whether the Composition's `controller ` is executed in each trial - (see controller_mode ` for timing of execution). Set to True by default - if `controller ` is specified. Setting it to False suppresses exectuion of the - `controller `. + determines whether the Composition's `controller ` is executed when the Composition + is run. Set to True by default if `controller ` is specified. Setting it to False + suppresses exectuion of the `controller ` (see `Composition_Controller_Execution` + for additional details, including timing of execution). controller_mode : BEFORE or AFTER - determines whether the controller is executed before or after the rest of the `Composition` - is executed on each trial. + determines whether the `controller ` is executed before or after the rest of the + `Composition` when it is run, at the `TimeScale` determined by `controller_time_scale + ` (see `Composition_Controller_Execution` for additional details). + + controller_time_scale: TimeScale[TIME_STEP, PASS, TRIAL, RUN] : default TRIAL + deterines the frequency at which the `controller ` is executed, either before or + after the Composition as determined by `controller_mode ` (see + `Composition_Controller_Execution` for additional details). controller_condition : Condition - specifies whether the controller is executed in a given trial. The default is `Always`, which - executes the controller on every trial. + determines whether the `controller ` is executed in a given trial. The + default is `Always()`, which executes the controller on every trial (see `Composition_Controller_Execution` + for additional details). default_execution_id if no *context* is specified in a call to run, this *context* is used; by default, @@ -3266,6 +3536,8 @@ def __init__( pathways=None, nodes=None, projections=None, + allow_probes:Union[bool, CONTROL]=True, + include_probes_in_output:bool=False, disable_learning:bool=False, controller:ControlMechanism=None, enable_controller=None, @@ -3292,6 +3564,8 @@ def __init__( self._graph_processing = None self.nodes = ContentAddressableList(component_type=Component) self.node_ordering = [] + self.allow_probes = allow_probes + self.include_probes_in_output=include_probes_in_output self.required_node_roles = [] self.excluded_node_roles = [] from psyneulink.core.compositions.pathway import Pathway @@ -3316,8 +3590,6 @@ def __init__( port_map=self.output_CIM_ports) self.cims = [self.input_CIM, self.parameter_CIM, self.output_CIM] - self.shadows = {} - self.default_execution_id = self.name self.execution_ids = {self.default_execution_id} @@ -3328,11 +3600,12 @@ def __init__( self.disable_learning = disable_learning - # status attributes + # graph and scheduler status attributes self.graph_consistent = True # Tracks if Composition is in runnable state (no dangling projections (what else?) self.needs_update_graph = True # Tracks if Composition graph has been analyzed to assign roles to components self.needs_update_graph_processing = True # Tracks if the processing graph is current with the full graph - self.needs_update_scheduler = True # Tracks i4f the scheduler needs to be regenerated + self.needs_update_scheduler = True # Tracks if the scheduler needs to be regenerated + self.needs_update_controller = True # Tracks if controller needs to update its state_input_ports self.nodes_to_roles = collections.OrderedDict() self.cycle_vertices = set() @@ -3354,6 +3627,25 @@ def __init__( self.log = CompositionLog(owner=self) self._terminal_backprop_sequences = {} + self.controller = None + + # FIX 4/8/20 [JDC]: WHY NOT CALL add_nodes()? + # Nodes, Projections, and Pathways + if nodes is not None: + nodes = convert_to_list(nodes) + for node in nodes: + required_roles = None + if isinstance(node, tuple): + node, required_roles = node + self.add_node(node, required_roles) + + # FIX 4/8/20 [JDC]: TEST THIS + if projections is not None: + projections = convert_to_list(projections) + self.add_projections(projections) + + self.add_pathways(pathways, context=context) + # Controller self.controller = None self._controller_initialization_status = ContextFlags.INITIALIZED @@ -3365,6 +3657,9 @@ def __init__( self.controller_time_scale = controller_time_scale self.controller_condition = controller_condition self.controller_condition.owner = self.controller + # This is set at runtime and may be used by the controller to assign its + # `num_trials_per_estimate ` attribute. + self.num_trials = None self._update_parameter_components() @@ -3376,20 +3671,6 @@ def __init__( # the behavior of components del self.parameters.value - # FIX 4/8/20 [JDC]: WHY NOT CALL add_nodes()? - # Nodes, Projections, and Pathways - if nodes is not None: - nodes = convert_to_list(nodes) - for node in nodes: - self.add_node(node) - - # FIX 4/8/20 [JDC]: TEST THIS - if projections is not None: - projections = convert_to_list(projections) - self.add_projections(projections) - - self.add_pathways(pathways, context=context) - # Call with context = COMPOSITION to avoid calling _check_initialization_status again self._analyze_graph(context=context) @@ -3454,7 +3735,7 @@ def scheduling_mode(self, scheduling_mode: SchedulingMode): self.scheduler.scheduling_mode = scheduling_mode # ****************************************************************************************************************** - # GRAPH + # region -------------------------------------- GRAPH ------------------------------------------------------------- # ****************************************************************************************************************** @handle_external_context(source=ContextFlags.COMPOSITION) @@ -3473,9 +3754,10 @@ def _analyze_graph(self, context=None): nodes are not set to `OUTPUT ` by default. """ - # Instantiate any deferred init components - self._check_projection_initialization_status(context=context) + self._check_controller_initialization_status(context=context) + self._check_nodes_initialization_status(context=context) + # FIX: SHOULDN'T THIS TEST MORE EXPLICITLY IF NODE IS A Composition? # Call _analzye_graph() for any nested Compositions for n in self.nodes: try: @@ -3484,10 +3766,14 @@ def _analyze_graph(self, context=None): pass self._complete_init_of_partially_initialized_nodes(context=context) + # Call before _determine_pathway and _create_CIM_ports so they have updated roles self._determine_node_roles(context=context) self._determine_pathway_roles(context=context) self._create_CIM_ports(context=context) + # Call after above so shadow_projections have relevant organization self._update_shadow_projections(context=context) + # Call again to accomodate any changes from _update_shadow_projections + self._determine_node_roles(context=context) self._check_for_projection_assignments(context=context) self.needs_update_graph = False @@ -3516,16 +3802,16 @@ def remove_vertex(vertex): self._graph_processing.prune_feedback_edges() self.needs_update_graph_processing = False + # endregion GRAPH # ****************************************************************************************************************** - # NODES + # region ---------------------------------------NODES ------------------------------------------------------------- # ****************************************************************************************************************** - @handle_external_context(source = ContextFlags.COMPOSITION) def add_node(self, node, required_roles=None, context=None): """ - Add a Composition Node (`Mechanism ` or `Composition`) to Composition, if it is not already added + Add a Node (`Mechanism ` or `Composition`) to Composition, if it is not already added Arguments --------- @@ -3538,24 +3824,23 @@ def add_node(self, node, required_roles=None, context=None): """ # FIX 5/25/20 [JDC]: ADD ERROR STRING (as in pathway_arg_str in add_linear_processing_pathway) + # Raise error if Composition is added to itself if node is self: pathway_arg_str = "" if context.source in {ContextFlags.INITIALIZING, ContextFlags.METHOD}: pathway_arg_str = " in " + context.string raise CompositionError(f"Attempt to add Composition as a Node to itself{pathway_arg_str}.") - self._update_shadows_dict(node) + if isinstance(node, Composition): + # IMPLEMENTATION NOTE: include_probes_in_output=False is not currently supported for nested Nodes + # (they require get_output_value() to return value of all output_ports of output_CIM) + node.include_probes_in_output = True try: node._analyze_graph(context = context) except AttributeError: pass - # # MODIFIED 6/13/20 NEW: - # if any(n is node for nested_comp in self.nodes if isinstance(nested_comp, Composition) for n in nested_comp.nodes): - # return - # MODIFIED 6/13/20 END - node._check_for_composition(context=context) # Add node to Composition's graph @@ -3570,6 +3855,7 @@ def add_node(self, node, required_roles=None, context=None): self.needs_update_graph = True self.needs_update_graph_processing = True self.needs_update_scheduler = True + self.needs_update_controller = True invalid_aux_components = self._add_node_aux_components(node) @@ -3580,16 +3866,6 @@ def add_node(self, node, required_roles=None, context=None): for required_role in required_roles: self._add_required_node_role(node, required_role, context) - # Add projections to node from sender of any shadowed InputPorts - for input_port in node.input_ports: - if hasattr(input_port, SHADOW_INPUTS) and input_port.shadow_inputs is not None: - for proj in input_port.shadow_inputs.path_afferents: - sender = proj.sender - if sender.owner != self.input_CIM: - self.add_projection(projection=MappingProjection(sender=proj.sender, receiver=input_port), - sender=proj.sender.owner, - receiver=node) - # Add ControlSignals to controller and ControlProjections # to any parameter_ports specified for control in node's constructor if self.controller: @@ -3601,35 +3877,8 @@ def add_node(self, node, required_roles=None, context=None): except NameError: pass - def _instantiate_deferred_init_control(self, node, context=None): - """ - If node is a Composition with a controller, activate its nodes' deferred init control specs for its controller. - If it does not have a controller, but self does, activate them for self's controller. - - If node is a Node that has deferred init control specs and self has a controller, activate the deferred init - control specs for self's controller. - - Returns - ------- - - list of hanging control specs that were not able to be assigned for a controller at any level. - - """ - hanging_control_specs = [] - if node.componentCategory == 'Composition': - for nested_node in node.nodes: - hanging_control_specs.extend(node._instantiate_deferred_init_control(nested_node, context=context)) - else: - hanging_control_specs = node._get_parameter_port_deferred_init_control_specs() - if not self.controller: - return hanging_control_specs - else: - for spec in hanging_control_specs: - control_signal = self.controller._instantiate_control_signal(control_signal=spec, - context=context) - self.controller.control.append(control_signal) - self.controller._activate_projections_for_compositions(self) - return [] + if isinstance(node, ControlMechanism): + self._handle_allow_probes_for_control(node) def add_nodes(self, nodes, required_roles=None, context=None): """ @@ -3651,8 +3900,6 @@ def add_nodes(self, nodes, required_roles=None, context=None): """ if not isinstance(nodes, list): - # raise CompositionError(f"Arg for 'add_nodes' method of '{self.name}' {Composition.__name__} " - # f"must be a list of nodes or (node, required_roles) tuples") nodes = convert_to_list(nodes) for node in nodes: if isinstance(node, (Mechanism, Composition)): @@ -3859,19 +4106,37 @@ def get_nodes_by_role(self, role): except KeyError as e: raise CompositionError('Node missing from {0}.nodes_to_roles: {1}'.format(self, e)) + def _get_nested_nodes_with_same_roles_at_all_levels(self, comp, include_roles, exclude_roles=None): + """Return all Nodes from nested Compositions that have *include_roles* but not *exclude_roles at all levels*. + Note: need to do this recursively, checking roles on the "way down," since a Node may have a role in a + deeply nested Composition, but that Composition itself may not have the same role in the Composition + within which *it* is nested (e.g., a Node might be an INPUT Node of a nested Composition, but that + nested Composition may not be an INPUT Node of the Composition in which it is nested). + """ + nested_nodes = [] + include_roles = convert_to_list(include_roles) + if exclude_roles: + exclude_roles = convert_to_list(exclude_roles) + else: + exclude_roles = [] + if isinstance(comp, Composition): + # Get all nested nodes in comp that have include_roles and not exclude_roles: + for node in [n for n in comp.nodes + if (any(n in comp.get_nodes_by_role(include) + for include in include_roles) + and not any(n in comp.get_nodes_by_role(exclude) + for exclude in exclude_roles))]: + if isinstance(node, Composition): + nested_nodes.extend(node._get_nested_nodes_with_same_roles_at_all_levels(node, include_roles, + exclude_roles)) + else: + nested_nodes.append(node) + return nested_nodes or None + def _get_input_nodes_by_CIM_input_order(self): """Return a list with the `INPUT` `Nodes ` of the Composition in the same order as their corresponding InputPorts on Composition's `input_CIM `. """ - # input_nodes = [] - # for i, port in enumerate(self.input_CIM.input_ports): - # output_port = next((o for o in self.input_CIM.output_ports - # if o.function.corresponding_input_port.position_in_mechanism == i), None) - # assert output_port - # node = next((p.receiver.owner for p in output_port.efferents if not SHADOW_INPUT_NAME in p.name), None) - # assert node - # input_nodes.append(node) - # return input_nodes return [{cim[0]:n for n, cim in self.input_CIM_ports.items()}[input_port].owner for input_port in self.input_CIM.input_ports] @@ -3880,9 +4145,8 @@ def _get_nested_nodes(self, nested_nodes=NotImplemented, root_composition=NotImplemented, visited_compositions=NotImplemented): - """Recursive search that returns all nodes of all nested compositions in a tuple with the composition they are - embedded in. - + """Recursively search and return all nodes of all nested Compositions + in a tuple with Composition in which they are nested. :return A list of tuples in format (node, composition) containing all nodes of all nested compositions. @@ -3904,10 +4168,21 @@ def _get_nested_nodes(self, nested_nodes.append((node,self)) return nested_nodes + def _handle_allow_probes_for_control(self, node): + """Reconcile allow_probes for Composition and any ControlMechanisms assigned to it, including controller. + """ + assert isinstance(node, ControlMechanism), \ + f"PROGRAM ERROR: Attempt to handle 'allow_probes' arg for non-ControlMechanism." + # If ControlMechanism has specified allow_probes, assign at least CONTROL to Composition.allow_probes + if not self.allow_probes and node.allow_probes: + self.allow_probes = CONTROL + # If allow_probes is specified on Composition as CONTROL, then turn it on for ControlMechanism + node.allow_probes = node.allow_probes or self.allow_probes is CONTROL + def _get_nested_compositions(self, nested_compositions=NotImplemented, visited_compositions=NotImplemented): - """Recursive search that returns all nested compositions. + """Recursive search for and return all nested compositions. :return @@ -3927,6 +4202,12 @@ def _get_nested_compositions(self, visited_compositions) return nested_compositions + def _get_all_nodes(self): + """Return all nodes, including those within nested Compositions at any level + Note: this is distinct from the _all_nodes property, which returns all nodes at the top level + """ + return [k[0] for k in self._get_nested_nodes()] + list(self.nodes) + def _determine_origin_and_terminal_nodes_from_consideration_queue(self): """Assigns NodeRole.ORIGIN to all nodes in the first entry of the consideration queue and NodeRole.TERMINAL to all nodes in the last entry of the consideration queue. The ObjectiveMechanism of a Composition's @@ -3955,28 +4236,29 @@ def _determine_origin_and_terminal_nodes_from_consideration_queue(self): self._add_node_role(node, NodeRole.TERMINAL) def _add_node_aux_components(self, node, context=None): - """ + """Add aux_components of node to Composition. + Returns ------- - list containing references to all invalid aux components - """ - # Implement any components specified in node's aux_components attribute + invalid_aux_components = [] if hasattr(node, "aux_components"): - # Collect the node's aux components that are not currently able to be added to the Composition - # we'll ignore these for now and try to activate them again during every call to _analyze_graph - # at runtime if there are still any invalid aux components left, we will issue a warning + # Collect the node's aux components that are not currently able to be added to the Composition; + # ignore these for now and try to activate them again during every call to _analyze_graph + # and, at runtime, if there are still any invalid aux_components left, issue a warning projections = [] # Add all "nodes" to the composition first (in case projections reference them) - for component in node.aux_components: + for i, component in enumerate(node.aux_components): if isinstance(component, (Mechanism, Composition)): if isinstance(component, Composition): component._analyze_graph() self.add_node(component) elif isinstance(component, Projection): - projections.append((component, False)) + proj_tuple = (component, False) + projections.append(proj_tuple) + node.aux_components[i] = proj_tuple elif isinstance(component, tuple): if isinstance(component[0], Projection): if (isinstance(component[1], bool) or component[1] in {EdgeType.FLEXIBLE, MAYBE}): @@ -4013,6 +4295,7 @@ def _add_node_aux_components(self, node, context=None): .format(component.name, node.name)) invalid_aux_components.extend(self._get_invalid_aux_components(node)) + # Add all Projections to the Composition for proj_spec in [i for i in projections if not i[0] in invalid_aux_components]: # The proj_spec assumes a direct connection between sender and receiver, and is therefore invalid if @@ -4034,12 +4317,77 @@ def _add_node_aux_components(self, node, context=None): self.add_projection(sender=proj_spec[0].sender, receiver=proj_spec[0].receiver, feedback=proj_spec[1]) + del node.aux_components[node.aux_components.index(proj_spec)] + return invalid_aux_components + def _get_invalid_aux_components(self, node): + """ + Return any Components in aux_components for a node that references items not (yet) in this Composition + """ + # FIX 11/20/21: THIS APPEARS TO ONLY HANDLE PROJECTIONS AND NOT COMPOSITIONS OR MECHANISMS + # (OTHER THAN THE COMPOSITION'S controller AND ITS objective_mechanism) + + # First get all valid nodes: + # - nodes in Composition + # - nodes in any nested Compositions + # - controller and associated objective_mechanism + valid_nodes = [node for node in self.nodes.data] + \ + [node for node, composition in self._get_nested_nodes()] + \ + [self] + if self.controller: + valid_nodes.append(self.controller) + if hasattr(self.controller,'objective_mechanism'): + valid_nodes.append(self.controller.objective_mechanism) + + # Then get invalid components: + # - Projections that have senders or receivers not in the Composition + # (this includes any in aux_components of node, or associated with any Mechanism listed in aux_components) + invalid_components = [] + for aux in node.aux_components: + component = None + if isinstance(aux, Projection): + component = aux + elif hasattr(aux, '__iter__'): + for i in aux: + if isinstance(i, Projection): + component = i + elif isinstance(i, Mechanism): + if self._get_invalid_aux_components(i): + invalid_components.append(i) + elif isinstance(aux, Mechanism): + if self._get_invalid_aux_components(aux): + invalid_components.append(aux) + if not component: + continue + if isinstance(component, Projection): + if hasattr(component.sender, OWNER_MECH): + sender_node = component.sender.owner_mech + else: + if isinstance(component.sender.owner, CompositionInterfaceMechanism): + sender_node = component.sender.owner.composition + else: + sender_node = component.sender.owner + if hasattr(component.receiver, OWNER_MECH): + receiver_node = component.receiver.owner_mech + else: + if isinstance(component.receiver.owner, CompositionInterfaceMechanism): + receiver_node = component.receiver.owner.composition + else: + receiver_node = component.receiver.owner + # Defer instantiation of all shadow Projections until call to _update_shadow_projections() + if (not all([sender_node in valid_nodes, receiver_node in valid_nodes]) + or (hasattr(component.receiver, SHADOW_INPUTS) and component.receiver.shadow_inputs)): + invalid_components.append(component) + if invalid_components: + return invalid_components + else: + return [] + def _complete_init_of_partially_initialized_nodes(self, context=None): """ - Attempt to complete initialization of aux components for any nodes with - aux components that were not previously compatible with Composition + Attempt to complete initialization of aux_components for any nodes with + aux_components that were not previously compatible with Composition """ completed_nodes = [] for node in self._partially_added_nodes: @@ -4048,6 +4396,20 @@ def _complete_init_of_partially_initialized_nodes(self, context=None): completed_nodes.append(node) self._partially_added_nodes = list(set(self._partially_added_nodes) - set(completed_nodes)) + # Don't instantiate unless flagged for updating (if nodes have been added to the graph); + # this avoids unnecessary calls on repeated calls to run(). + if (self.controller + and self.needs_update_controller + and context.flags & (ContextFlags.COMPOSITION | ContextFlags.COMMAND_LINE)): + if hasattr(self.controller, 'state_input_ports'): + self.controller._update_state_input_ports_for_controller(context=context) + # self._instantiate_controller_shadow_projections(context=context) + self.controller._validate_monitor_for_control(self._get_all_nodes()) + self._instantiate_control_projections(context=context) + # FIX: 11/15/21 - CAN'T SET TO FALSE HERE, AS THIS IS CALLED BY _analyze_graph() FROM add_node() + # BEFORE PROJECTIONS TO THE NODE HAS BEEN ADDED (AFTER CALL TO add_node()) + self.needs_update_controller = False + def _determine_node_roles(self, context=None): """Assign NodeRoles to Nodes in Composition @@ -4188,44 +4550,8 @@ def _determine_node_roles(self, context=None): NodeRole.FEEDBACK_RECEIVER ) - # region - # # MODIFIED 4/25/20 OLD NOTES: - # # If no OUTPUT nodes were explicitly specified as required_roles by *user* , assign them: - # # - if there are LearningMechanisms, OUTPUT node is the last non-learning-related node. - # # - if there are no TERMINAL nodes either, then the last node added to the Composition becomes the OUTPUT node. - # # - ignore OUTPUT nodes in learning pathways as those are assigned automatically in add_linear_learning_pathway - # # and don't want that to suppress normal assignment of TERMINAL nodes in non-learning pathways as OUTPUT nodes - # # (if user has not specified any as required roles) - # # # Assign TERMINAL role to nodes that are last in the scheduler's consideration queue that are: - # # # - not used for Learning; - # # # - not ControlMechanisms or ObjectiveMechanisms that project to them; - # # # - do not project to any other nodes. - # # - # # # First, find last `consideration_set ` in scheduler that does not contain only - # # # learning-related nodes, ControlMechanism(s) or control-related ObjectiveMechanism(s); - # # # note: get copy of the consideration_set, as don't want to modify one actually used by scheduler - # # # Next, remove any learning-related nodes, ControlMechanism(s) or control-related - # # # ObjectiveMechanism(s) that may have "snuck in" (i.e., happen to be in the set) - # # # Then, add any nodes that are not learning-related or a ControlMechanism, - # # # and that have *no* efferent Projections - # # # IMPLEMENTATION NOTE: - # # # Do this here, as the list considers entire sets in the consideration queue, - # # # and a node with no efferents may be in the same set as one with efferents - # # # if they have the same dependencies. - # # Assign TERMINAL role to nodes that are last in the scheduler's consideration queue that are: - # # - not used for Learning; - # # - not ControlMechanisms or ObjectiveMechanisms that project to them; - # # - do not project to any other nodes. - # # FIX 4/25/20 [JDC]: MISSES ObjectiveMechanism BURIED IN LAST CONSIDERATION SET - # # First, find last `consideration_set ` in scheduler that does not contain only - # # learning-related nodes, ControlMechanism(s) or control-related ObjectiveMechanism(s); - # # note: get copy of the consideration_set, as don't want to modify one actually used by scheduler - # # MODIFIED 4/25/20 END - # endregion - - # MODIFIED 4/25/20 NEW: # FIX 4/25/20 [JDC]: NEED TO AVOID AUTOMATICALLY (RE-)ASSIGNING ONES REMOVED BY exclude_node_roles - # - Simply execlude any LEARNING_OBJECTIVE and CONTROL_OBJECTIVE that project only to ModulatoryMechanism + # - Simply exclude any LEARNING_OBJECTIVE and CONTROL_OBJECTIVE that project only to ModulatoryMechanism # - NOTE IN PROGRAM ERROR FAILURE TO ASSIGN CONTROL_OBJECTIVE # OUTPUT @@ -4261,7 +4587,7 @@ def _determine_node_roles(self, context=None): # self._add_node_role(node, NodeRole.OUTPUT) # continue - # Assign OUTPUT if it is an `RecurrentTransferMechanism` configured for learning + # Assign OUTPUT if it is a `RecurrentTransferMechanism` configured for learning # and doesn't project to any Nodes other than its `AutoassociativeLearningMechanism` # (this is not picked up as a `TERMINAL` since it projects to the `AutoassociativeLearningMechanism`) # but can (or already does) project to an output_CIM @@ -4291,7 +4617,20 @@ def _determine_node_roles(self, context=None): or (isinstance(p.receiver.owner, ControlMechanism) and not isinstance(node, ObjectiveMechanism))) for p in node.efferents): self._add_node_role(node, NodeRole.OUTPUT) - # MODIFIED 4/25/20 END + + # If node is a Composition and its output_CIM has OutputPorts that either have no Projections + # or projections to self.output_CIM, then assign as OUTPUT Node + # Note: this ensures that if a nested Comp has both Nodes that project to ones in an outer Composition + # *and* legit OUTPUT Nodes, the latter qualify to make the nested Comp an OUTPUT Node + if isinstance(node, Composition): + # for port in node.output_CIM.output_ports: + # if (not port.efferents + # or any(proj.receiver.owner is self.output_CIM for proj in port.efferents)): + # self._add_node_role(node, NodeRole.OUTPUT) + # break + if any(not port.efferents or any(proj.receiver.owner is self.output_CIM for proj in port.efferents) + for port in node.output_CIM.output_ports): + self._add_node_role(node, NodeRole.OUTPUT) # Assign SINGLETON and INTERNAL nodes for node in self.nodes: @@ -4393,8 +4732,8 @@ def _create_CIM_ports(self, context=None): OutputPort of each OUTPUT node. Connect the OUTPUT node's OutputPort to the output_CIM's corresponding InputPort via a standard MappingProjection. - - create a corresponding InputPort and ControlSignal on the `parameter_CIM ` for each - InputPort of each node in the Composition that receives a modulatory projection from an enclosing + - create a corresponding InputPort and ControlSignal on the `parameter_CIM ` for + each InputPort of each node in the Composition that receives a modulatory projection from an enclosing Composition. Connect the original ControlSignal to the parameter_CIM's corresponding InputPort via a standard MappingProjection, then activate the projections that are created automatically during instantiation of the ControlSignals to carry that signal to the target ParameterPort. @@ -4457,10 +4796,8 @@ def _create_CIM_ports(self, context=None): name= INPUT_CIM_NAME + "_" + node.name + "_" + input_port.name, context=context) - # MODIFIED 6/13/20 NEW: if NodeRole.TARGET in self.get_roles_by_node(node): interface_input_port.parameters.require_projection_in_composition.set(False, override=True) - # MODIFIED 6/13/20 END # add port to the input CIM self.input_CIM.add_ports([interface_input_port], @@ -4515,8 +4852,8 @@ def _create_CIM_ports(self, context=None): # Set up ports on the output CIM for all output nodes in the Composition current_output_node_output_ports = set() - # loop through all output ports on output nodes - for node in self.get_nodes_by_role(NodeRole.OUTPUT): + # loop through all output ports on OUTPUT and PROBE nodes + for node in self.get_nodes_by_role(NodeRole.OUTPUT) + self.get_nodes_by_role(NodeRole.PROBE): for output_port in node.output_ports: current_output_node_output_ports.add(output_port) @@ -4537,7 +4874,8 @@ def _create_CIM_ports(self, context=None): # instantiate the output port on the output CIM to correspond to the node's output port interface_output_port = OutputPort( owner=self.output_CIM, - variable=(OWNER_VALUE, functools.partial(self.output_CIM.get_input_port_position, interface_input_port)), + variable=(OWNER_VALUE, functools.partial(self.output_CIM.get_input_port_position, + interface_input_port)), function=Identity, reference_value=output_port.defaults.value, name=OUTPUT_CIM_NAME + "_" + node.name + "_" + output_port.name, @@ -4584,6 +4922,7 @@ def _create_CIM_ports(self, context=None): del self.output_CIM_ports[output_port] # PARAMETER CIM + # We get the projection that needs to be routed through the PCIM as well as the composition that owns it, # because we will need to activate the new projections for the composition that owns the PCIM as well as the # referring composition @@ -4640,7 +4979,7 @@ def _create_CIM_ports(self, context=None): # Get node port mappings for cim node_port_to_cim_port_tuples_mapping = cim.port_map # Create lists of tuples of (cim_input_port, cim_output_port, index), in which indices are for - # nodes within self.nodes (cim_node_indices) and ports wihin nodes (cim_port_within_node_indices + # nodes within self.nodes (cim_node_indices) and ports within nodes (cim_port_within_node_indices cim_node_indices = [] cim_port_within_node_indices = [] for node_port, cim_ports in node_port_to_cim_port_tuples_mapping.items(): @@ -4718,7 +5057,8 @@ def _create_CIM_ports(self, context=None): # f"({p} does not match the number of its OutputPorts ({n})." elif type==OUTPUT: n = len(cim.input_ports) - len(cim.user_added_ports[INPUT_PORTS]) - o = sum([len(n.output_ports) for n in self.get_nodes_by_role(NodeRole.OUTPUT)]) + o = sum([len(n.output_ports) + for n in self.get_nodes_by_role(NodeRole.PROBE) + self.get_nodes_by_role(NodeRole.OUTPUT)]) assert n == o, f"PROGRAM ERROR: Number of InputPorts on {self.output_CIM.name} ({n}) does not " \ f"match the number of OutputPorts over all OUTPUT nodes of {self.name} ({o})." # p = len([p for p in self.projections if OUTPUT_CIM_NAME in p.name]) @@ -4735,36 +5075,76 @@ def _create_CIM_ports(self, context=None): def _get_nested_node_CIM_port(self, node: Mechanism, node_port: tc.any(InputPort, OutputPort), - role: tc.enum(NodeRole.INPUT, NodeRole.OUTPUT) + role: tc.enum(NodeRole.INPUT, NodeRole.PROBE, NodeRole.OUTPUT) ): """Check for node in nested Composition - Return relevant port of relevant CIM if found and nested Composition in which it was found, else (None, None) + Assign NodeRole.PROBE to relevant nodes if allow_probes is specified (see handle_probes below) + Return relevant port of relevant CIM if found and nested Composition in which it was found; else None's """ + def try_assigning_as_probe(node, role, comp): + """Try to assign node as PROBE + If: + - node is an INPUT or INTERNAL node in its Composition + - outermost Composition has controller + - allow_probes is set for it or its objective_mechanism + Then: + - add PROBE as one of its roles + - call _analyze_graph() to create output_CIMs ports and projections for it + - return True + Else: + - return False + """ + err_msg = f"{node.name} found in nested {Composition.__name__} of {self.name} " \ + f"({nc.name}) but without required {role}." + + # Get any Nodes monitored by ControlMechanisms for which allow_probes is specified + ctl_monitored_nodes = {} + if any(isinstance(n, ControlMechanism) and n.allow_probes for n in self._all_nodes): + ctl_monitored_nodes = self._get_monitor_for_control_nodes() + + # If allow_probes is set on the Composition or any ControlMechanisms, then attempt to assign node as PROBE + if self.allow_probes is True or ctl_monitored_nodes: + # Check if Node is an INPUT or INTERNAL + if any(role for role in comp.nodes_to_roles[node] if role in {NodeRole.INPUT, NodeRole.INTERNAL}): + comp._add_required_node_role(node, NodeRole.PROBE) + self._analyze_graph() + return + + # Failed to assign node as PROBE, so get ControlMechanisms that may be trying to monitor it + ctl_monitored_nodes = self._get_monitor_for_control_nodes() + if node in ctl_monitored_nodes: + if ctl_monitored_nodes[node].objective_mechanism: + # Node was specified for monitoring by an ObjectiveMechanism of a ControlMechanism + raise CompositionError(err_msg + f" Try setting '{ALLOW_PROBES}' argument of ObjectiveMechanism " + f"for {ctl_monitored_nodes[node].name} to 'True'.") + # Node was specified for monitoring by ControlMechanism + raise CompositionError(err_msg + f" Try setting '{ALLOW_PROBES}' argument " + f"of {ctl_monitored_nodes[node].name} to 'True'.") + # Node was not specified for monitoring by a ControlMechanism + raise CompositionError(err_msg) + nested_comp = CIM_port_for_nested_node = CIM = None nested_comps = [i for i in self.nodes if isinstance(i, Composition)] for nc in nested_comps: nested_nodes = dict(nc._get_nested_nodes()) if node in nested_nodes or node in nc.nodes.data: - # Must be assigned Node.Role of INPUT or OUTPUT (depending on receiver vs sender) + owning_composition = nc if node in nc.nodes else nested_nodes[node] + # Must be assigned Node.Role of INPUT, PROBE, or OUTPUT (depending on receiver vs sender) # This validation does not apply to ParameterPorts. Externally modulated nodes - # can be in any position within a Composition. They don't need to be INPUT or OUTPUT nodes - if not isinstance(node_port, ParameterPort): - owning_composition = nc if node in nc.nodes.data else nested_nodes[node] - if role not in owning_composition.nodes_to_roles[node]: - raise CompositionError("{} found in nested {} of {} ({}) but without required {} ({})". - format(node.name, Composition.__name__, self.name, nc.name, - NodeRole.__name__, repr(role))) + # can be in any position within a Composition. They don't need to be INPUT or OUTPUT nodes. + if not isinstance(node_port, ParameterPort) and role not in owning_composition.nodes_to_roles[node]: + try_assigning_as_probe(node, role, owning_composition) # With the current implementation, there should never be multiple nested compositions that contain the # same mechanism -- because all nested compositions are passed the same execution ID + # FIX: 11/15/21: ??WHY IS THIS COMMENTED OUT: # if CIM_port_for_nested_node: # warnings.warn("{} found with {} of {} in more than one nested {} of {}; " # "only first one found (in {}) will be used". # format(node.name, NodeRole.__name__, repr(role), # Composition.__name__, self.name, nested_comp.name)) # continue - if isinstance(node_port, InputPort): if node_port in nc.input_CIM_ports: CIM_port_for_nested_node = owning_composition.input_CIM_ports[node_port][0] @@ -4778,7 +5158,10 @@ def _get_nested_node_CIM_port(self, CIM_port_for_nested_node = owning_composition.output_CIM_ports[node_port][1] CIM = owning_composition.output_CIM else: - nested_node_CIM_port_spec = nc._get_nested_node_CIM_port(node, node_port, NodeRole.OUTPUT) + nested_node_CIM_port_spec = nc._get_nested_node_CIM_port(node, + node_port, + role) + # NodeRole.OUTPUT) CIM_port_for_nested_node = nc.output_CIM_ports[nested_node_CIM_port_spec[0]][1] CIM = nc.output_CIM elif isinstance(node_port, ParameterPort): @@ -4795,92 +5178,27 @@ def _get_nested_node_CIM_port(self, CIM = nc.parameter_CIM nested_comp = nc break + + # Return CIM_port_for_nested_node in both expected node and node_port slots return CIM_port_for_nested_node, CIM_port_for_nested_node, nested_comp, CIM - def _update_shadows_dict(self, node): - # Create an empty entry for this node in the Composition's "shadows" dict - # If any other nodes shadow this node, they will be added to the list - if node not in self.shadows: - self.shadows[node] = [] + # endregion NODES - nested_nodes = dict(self._get_nested_nodes()) - # If this node is shadowing another node, then add it to that node's entry in the Composition's "shadows" dict - # If the node it's shadowing is a nested node, add it to the entry for the composition it's nested in. - for input_port in node.input_ports: - if hasattr(input_port, SHADOW_INPUTS) and input_port.shadow_inputs is not None: - owner = input_port.shadow_inputs.owner - if isinstance(owner, CompositionInterfaceMechanism): - owner = owner.composition - if owner in nested_nodes: - owner = nested_nodes[owner] - if node is self.controller and self._controller_initialization_status == ContextFlags.DEFERRED_INIT: - if owner not in self.nodes: - continue - if node not in self.shadows[owner]: - self.shadows[owner].append(node) + # ****************************************************************************************************************** + # region ----------------------------------- PROJECTIONS ----------------------------------------------------------- + # ****************************************************************************************************************** - def _route_control_projection_through_intermediary_pcims(self, projection, sender, sender_mechanism, receiver, graph_receiver, context): + def add_projections(self, projections=None): """ - Takes as input a specification for a projection to a parameter port that is nested n-levels below its sender, - instantiates and activates ports and projections on intermediary pcims, and returns a new - projection specification from the original sender to the relevant input port of the pcim of the Composition - located in the same level of nesting. - """ - for proj in receiver.mod_afferents: - if proj.sender.owner == sender_mechanism: - receiver._remove_projection_to_port(proj) - for proj in sender.efferents: - if proj.receiver == receiver: - sender._remove_projection_from_port(proj) - modulation = sender.modulation - interface_input_port = InputPort(owner=graph_receiver.parameter_CIM, - variable=receiver.defaults.value, - reference_value=receiver.defaults.value, - name=PARAMETER_CIM_NAME + "_" + receiver.owner.name + "_" + receiver.name, - context=context) - graph_receiver.parameter_CIM.add_ports([interface_input_port], context=context) - # control signal for parameter CIM that will project directly to inner Composition's parameter - control_signal = ControlSignal( - modulation=modulation, - variable=(OWNER_VALUE, functools.partial(graph_receiver.parameter_CIM.get_input_port_position, interface_input_port)), - transfer_function=Identity, - modulates=receiver, - name=PARAMETER_CIM_NAME + "_" + receiver.owner.name + "_" + receiver.name, - ) - if receiver.owner not in graph_receiver.nodes.data + graph_receiver.cims: - receiver = interface_input_port - graph_receiver.parameter_CIM.add_ports([control_signal], context=context) - # add sender and receiver to self.parameter_CIM_ports dict - for p in control_signal.projections: - # self.add_projection(p) - graph_receiver.add_projection(p, receiver=p.receiver, sender=control_signal) - try: - sender._remove_projection_to_port(projection) - except ValueError: - pass - try: - receiver._remove_projection_from_port(projection) - except ValueError: - pass - receiver = interface_input_port - return MappingProjection(sender=sender, receiver=receiver) - - - # ****************************************************************************************************************** - # PROJECTIONS - # ****************************************************************************************************************** - - def add_projections(self, projections=None): - """ - Calls `add_projection ` for each Projection in the *projections* list. Each - Projection must have its `sender ` and `receiver ` - already specified. If an item in the list is a list of projections, called recursively on that list. - - Arguments - --------- - - projections : list of Projections - list of Projections to be added to the Composition + Calls `add_projection ` for each Projection in the *projections* list. Each + Projection must have its `sender ` and `receiver ` + already specified. If an item in the list is a list of projections, called recursively on that list. + + Arguments + --------- + + projections : list of Projections + list of Projections to be added to the Composition """ if isinstance(projections, list): @@ -4925,11 +5243,11 @@ def add_projection(self, - if it is in the Composition: - if there is only one, the request is ignored and the existing Projection is returned - if there is more than one, an exception is raised as this should never be the case - - it is NOT in the Composition: + - if it is NOT in the Composition: - if there is only one, that Projection is used; - if there is more than one, the last in the list (presumably the most recent) is used; in either case, processing continues, to activate it for the Composition, - construct any "shadow" projections that may be specified, and assign feedback if specified, + construct any "shadow" projections that may be specified, and assign feedback if specified. • if the status of **projection** is `deferred_init`: @@ -4944,22 +5262,20 @@ def add_projection(self, a `feedback` Projection are implemented (in case it has not already been done for the existing Projection). .. note:: - If **projection** is an instantiated Projection (i.e., not in `deferred_init`) and one already exists between - its `sender ` and `receiver ` a warning is generated and - the request is ignored. + If **projection** is an instantiated Projection (i.e., not in `deferred_init`), and one already exists + between its `sender ` and `receiver `, a warning is + generated and the request is ignored. - COMMENT: - IMPLEMENTATION NOTE: - Duplicates are determined by the **Ports** to which they project, not the Mechanisms (to allow - multiple Projections to exist between the same pair of Mechanisms using different Ports). - - + .. technical_note:: + Duplicates are determined by the `Ports ` to which they project, not the `Mechanisms ` + (to allow multiple Projections to exist between the same pair of Mechanisms using different Ports). + .. If an already instantiated Projection is passed to add_projection and is a duplicate of an existing one, - it is detected and suppresed, with a warning, in Port._instantiate_projections_to_port. - - + it is detected and suppressed, with a warning, in Port._instantiate_projections_to_port. + .. If a Projection with deferred_init status is a duplicate, it is fully suppressed here, as these are generated by add_linear_processing_pathway if the pathway overlaps with an existing one, and so warnings are unnecessary and would be confusing to users. - COMMENT Arguments --------- @@ -5000,8 +5316,8 @@ def add_projection(self, # will handle any existing Projections that are in the current Composition below. if sender and receiver and projection is None: existing_projections = self._check_for_existing_projections(sender=sender, - receiver=receiver, - in_composition=False) + receiver=receiver, + in_composition=False) if existing_projections: if isinstance(sender, Port): sender_check = sender.owner @@ -5011,7 +5327,10 @@ def add_projection(self, receiver_check = receiver.owner else: receiver_check = receiver - if ((not isinstance(sender_check, CompositionInterfaceMechanism) and sender_check not in self.nodes) + # If either the sender or receiver are not in Composition and are not CompositionInterfaceMechanisms + # remove the Projection and inclusion in relevant Ports + if ((not isinstance(sender_check, CompositionInterfaceMechanism) + and sender_check not in self.nodes) or (not isinstance(receiver_check, CompositionInterfaceMechanism) and receiver_check not in self.nodes)): for proj in existing_projections: @@ -5035,14 +5354,29 @@ def add_projection(self, f"the last of these will be used in {self.name}.") projection = existing_projections[-1] - # FIX: 9/30/19 - Why is this not an else? - # Because above is only for existing Projections outside of Composition, which should be - # used - # But existing one could be within, in which case want to use that one - # existing Projection might be deferred_init, and want t + # If Projection is one that is instantiated and is directly between Nodes in nested Compositions, + # then re-specify it so that the proper routing can be instantiated between those Compositions + # Note: restrict to PathwayProjections, since routing of ModulatoryProjections is handled separately. + elif (isinstance(projection, PathwayProjection_Base) + and projection._initialization_status is ContextFlags.INITIALIZED): + sender_node = projection.sender.owner + receiver_node = projection.receiver.owner + # If sender or receiver is in a nested Node + if ((sender_node not in self.nodes + and sender_node in [n[0] for n in self._get_nested_nodes()]) + or (receiver_node not in self.nodes + and receiver_node in [n[0] for n in self._get_nested_nodes()])): + proj_spec = {PROJECTION_TYPE:projection.className, + PROJECTION_PARAMS:{ + FUNCTION:projection.function, + MATRIX:projection.matrix.base} + } + return self.add_projection(proj_spec, sender=projection.sender, receiver=projection.receiver) + + # Create Projection if it doesn't exist try: # Note: this does NOT initialize the Projection if it is in deferred_init - projection = self._parse_projection_spec(projection, name) + projection = self._instantiate_projection_from_spec(projection, name) except DuplicateProjectionError: # return projection return @@ -5115,7 +5449,7 @@ def add_projection(self, # receiver = cim_target_input_port # FIX: KAM HACK 2/13/19 to get hebbian learning working for PSY/NEU 330 - # Add autoassociative learning mechanism + related projections to composition as processing components + # Add autoassociative learning mechanism + related projections to Composition as processing components if (sender_mechanism != self.input_CIM and sender_mechanism != self.parameter_CIM and sender_mechanism != self.controller @@ -5151,16 +5485,6 @@ def add_projection(self, # Note: do all of the following even if Projection is a existing_projections, # as these conditions should apply to the exisiting one (and it won't hurt to try again if they do) - # Create "shadow" projections to any input ports that are meant to shadow this projection's receiver - # (note: do this even if there is a duplciate and they are not allowed, as still want to shadow that projection) - if receiver_mechanism in self.shadows and len(self.shadows[receiver_mechanism]) > 0: - for shadow in self.shadows[receiver_mechanism]: - for input_port in shadow.input_ports: - if input_port.shadow_inputs is not None: - if input_port.shadow_inputs.owner == receiver: - # TBI: Copy the projection type/matrix value of the projection that is being shadowed - self.add_projection(MappingProjection(sender=sender, receiver=input_port), - sender_mechanism, shadow) # if feedback in {True, FEEDBACK}: # self.feedback_senders.add(sender_mechanism) # self.feedback_receivers.add(receiver_mechanism) @@ -5199,8 +5523,12 @@ def _validate_projection(self, raise CompositionError("{}'s receiver assignment [{}] is incompatible with the positions of these " "Components in the Composition.".format(projection, receiver)) - def _parse_projection_spec(self, projection, sender=None, receiver=None, name=None): - if isinstance(projection, (np.ndarray, np.matrix, list)): + def _instantiate_projection_from_spec(self, projection, sender=None, receiver=None, name=None): + if isinstance(projection, dict): + proj_type = projection.pop(PROJECTION_TYPE, None) or MappingProjection + params = projection.pop(PROJECTION_PARAMS, None) + projection = MappingProjection(params=params) + elif isinstance(projection, (np.ndarray, np.matrix, list)): return MappingProjection(matrix=projection, sender=sender, receiver=receiver, name=name) elif isinstance(projection, str): if projection in MATRIX_KEYWORD_VALUES: @@ -5259,12 +5587,13 @@ def _parse_sender_spec(self, projection, sender): else: sender_name = sender.name - # if the sender is IN a nested Composition AND sender is an OUTPUT Node + # if the sender is in a nested Composition AND sender is an OUTPUT Node # then use the corresponding CIM on the nested comp as the sender going forward + # (note: NodeRole.OUTPUT used even for PROBES, since those currently use same output_CIMS as OUTPUT nodes) sender, sender_output_port, graph_sender, sender_mechanism = \ self._get_nested_node_CIM_port(sender_mechanism, - sender_output_port, - NodeRole.OUTPUT) + sender_output_port, + NodeRole.OUTPUT) nested_compositions.append(graph_sender) if sender is None: receiver_name = 'node' @@ -5275,11 +5604,11 @@ def _parse_sender_spec(self, projection, sender): f"or any of its nested {Composition.__name__}s.") if hasattr(projection, "sender"): - if projection.sender.owner != sender and \ - projection.sender.owner != graph_sender and \ - projection.sender.owner != sender_mechanism: - raise CompositionError("The position of {} in {} conflicts with its sender attribute." - .format(projection.name, self.name)) + if (projection.sender.owner != sender + and projection.sender.owner != graph_sender + and projection.sender.owner != sender_mechanism): + raise CompositionError(f"The position of {projection.name} in {self.name} " + f"conflicts with its sender ({sender.name}).") return sender, sender_mechanism, graph_sender, nested_compositions @@ -5361,38 +5690,86 @@ def _parse_receiver_spec(self, projection, receiver, sender, learning_projection return receiver, receiver_mechanism, graph_receiver, receiver_input_port, \ nested_compositions, learning_projection - def _get_original_senders(self, input_port, projections): - original_senders = set() - for original_projection in projections: - if original_projection in self.projections: - original_senders.add(original_projection.sender) - correct_sender = original_projection.sender - shadow_found = False - for shadow_projection in input_port.path_afferents: - if shadow_projection.sender == correct_sender: - shadow_found = True - break - if not shadow_found: - # TBI - Shadow projection type? Matrix value? - new_projection = MappingProjection(sender=correct_sender, - receiver=input_port) - self.add_projection(new_projection, sender=correct_sender, receiver=input_port) - return original_senders - def _update_shadow_projections(self, context=None): - for node in self.nodes: - for input_port in node.input_ports: - if input_port.shadow_inputs: - original_senders = self._get_original_senders(input_port, input_port.shadow_inputs.path_afferents) - for shadow_projection in input_port.path_afferents: - if shadow_projection.sender not in original_senders: - self.remove_projection(shadow_projection) + """Instantiate any missing shadow_projections that have been specified in Composition + """ - # MODIFIED 4/4/20 OLD: - # # If the node does not have any roles, it is internal - # if len(self.get_roles_by_node(node)) == 0: - # self._add_node_role(node, NodeRole.INTERNAL) - # MODIFIED 4/4/20 END + # FIX 12/2/21: RENAME input_port -> shadowing_input_port + def _instantiate_missing_shadow_projections(input_port, projections): + """Instantiate shadow Projections that don't yet exist. + + **input_port** is InputPort to receive shadow Projections + **projections** are Projections to be shadowed + + Search recursively (i.e., including in nested Compositions) for receiver(s) of projections. + Instantiate any shadow Projections for them that don't yet exist. + Return actual senders of all shadow Projections. + """ + + def _get_correct_sender(comp, shadowed_projection): + """Search down the hierarchy of nested Compositions for Projection to shadow""" + if shadowed_projection in comp.projections: + return shadowed_projection.sender + else: + # Search for sender in INPUT Nodes of nested Compositions that are themselves INPUT Nodes + nested_input_comps = [nested_comp for nested_comp in comp._get_nested_compositions() + if nested_comp in comp.get_nodes_by_role(NodeRole.INPUT)] + for comp in nested_input_comps: + if shadowed_projection in comp.projections: + return _get_sender_at_right_level(shadowed_projection) + else: + return _get_correct_sender(comp, shadowed_projection) + return None + + def _get_sender_at_right_level(shadowed_proj): + """Search back up hierarchy of nested Compositions for sender at same level as **input_port**""" + if not isinstance(shadowed_proj.sender.owner, CompositionInterfaceMechanism): + raise CompositionError(f"Attempt to shadow the input to a node " + f"({shadowed_proj.receiver.owner.name}) in a nested Composition " + f"of {self.name} that is not an INPUT Node of that Composition is " + f"not currently supported.") + else: + # WANT THIS ONE'S SENDER + # item[0] item[1,0] item[1,1] + # CIM MAP ENTRIES: [SHADOWED PORT, [input_CIM InputPort, input_CIM OutputPort]] + sender_proj = [entry[1][0] + for entry in list(shadowed_proj.sender.owner.port_map.items()) + if entry[1][1] is shadowed_proj.sender][0].path_afferents[0] + if input_port.owner in sender_proj.sender.owner.composition._all_nodes: + return sender_proj.sender + else: + return _get_sender_at_right_level(sender_proj) + + original_senders = set() + for shadowed_projection in projections: + correct_sender = _get_correct_sender(self, shadowed_projection) + if correct_sender: + original_senders.add(correct_sender) + shadow_found = False + for shadow_projection in input_port.path_afferents: + if shadow_projection.sender == correct_sender: + shadow_found = True + break + if not shadow_found: + # TBI - Shadow projection type? Matrix value? + new_projection = MappingProjection(sender=correct_sender, + receiver=input_port) + self.add_projection(new_projection, sender=correct_sender, receiver=input_port) + return original_senders + + for shadowing_port, shadowed_port in self.shadowing_dict.items(): + senders = _instantiate_missing_shadow_projections(shadowing_port, + shadowed_port.path_afferents) + for shadow_projection in shadowing_port.path_afferents: + if shadow_projection.sender not in senders: + self.remove_projection(shadow_projection) + Projection_Base._delete_projection(shadow_projection) + if not shadow_projection.sender.efferents: + if isinstance(shadow_projection.sender.owner, CompositionInterfaceMechanism): + ports = shadow_projection.sender.owner.port_map.pop(shadow_projection.receiver) + shadow_projection.sender.owner.remove_ports(list(ports)) + else: + shadow_projection.sender.owner.remove_ports(shadow_projection.sender) def _check_for_projection_assignments(self, context=None): """Check that all Projections and Ports with require_projection_in_composition attribute are configured. @@ -5425,7 +5802,7 @@ def _check_for_projection_assignments(self, context=None): warnings.warn(f'{Projection.__name__} {projection.name} is missing a receiver.') def get_feedback_status(self, projection): - """Return True if **projection** is designated as a `feedback Projection <_Composition_Feedback_Designation>` + """Return True if **projection** is designated as a `feedback Projection ` in the Composition, else False. """ return projection in self.feedback_projections @@ -5535,12 +5912,13 @@ def _check_for_nesting_with_absolute_conditions(self, scheduler, termination_con if warn: warnings.warn(warn_str) + # endregion PROJECTIONS + # ****************************************************************************************************************** - # PATHWAYS + # region ------------------------------------- PATHWAYS ------------------------------------------------------------ # ****************************************************************************************************************** - - # ----------------------------------------- PROCESSING ----------------------------------------------------------- + # region ---------------------------------- PROCESSING ----------------------------------------------------------- # FIX: REFACTOR TO TAKE Pathway OBJECT AS ARGUMENT def add_pathway(self, pathway): @@ -5575,20 +5953,54 @@ def add_pathway(self, pathway): @handle_external_context() def add_linear_processing_pathway(self, pathway, name:str=None, context=None, *args): - """Add sequence of Mechanisms and/or Compositions with intercolated Projections. - - A `MappingProjection` is created for each contiguous pair of `Mechanisms ` and/or Compositions - in the **pathway** argument, from the `primary OutputPort ` of the first one to the - `primary InputPort ` of the second. - - Tuples (Mechanism, `NodeRoles `) can be used to assign `required_roles - ` to Mechanisms. - - Note that any specifications of the **monitor_for_control** `argument - ` of a constructor for a `ControlMechanism` or the **monitor** - argument specified in the constructor for an ObjectiveMechanism in the **objective_mechanism** `argument - ` of a ControlMechanism supercede any MappingProjections that would - otherwise be created for them when specified in the **pathway** argument of add_linear_processing_pathway. + """Add sequence of `Nodes ` with intercolated Projections. + + .. _Composition_Add_Linear_Processing_Pathway: + + Each `Node ` can be either a `Mechanism`, a `Composition`, or a tuple (Mechanism, `NodeRoles + `) that can be used to assign `required_roles` to Mechanisms (see `Composition_Nodes` for additional + details). + + `Projections ` can be intercolated between any pair of `Nodes `. If both Nodes + of a pair are Mechanisms, a single `MappingProjection` can be `specified `. The + same applies if the first Node is a `Composition` with a single `OUTPUT ` Node and/or the + second is a `Composition` with a single `INPUT ` Node. If either has more than one `INPUT + ` or `OUTPUT ` Node, respectively, then a list or set of Projections can be + specified for each pair of nested Nodes. If no `Projection` is specified between a pair of contiguous Nodes, + then default Projection(s) are constructed between them, as follows: + + * *One to one* - if both Nodes are Mechanisms or, if either is a Composition, the first (sender) has + only a single `OUTPUT ` Node and the second (receiver) has only a single `INPUT + ` Node, then a default `MappingProjection` is created from the `primary OutputPort + ` of the sender (or of its sole `OUTPUT ` Node if the sener is a + Composition) to the `primary InputPort ` of the receiver (or of its sole of `INPUT + ` Node if the receiver is a Composition). + + * *One to many* - if the first Node (sender) is either a Mechanism or a Composition with a single + `OUTPUT ` Node, but the second (receiver) is a Composition with more than one + `INPUT ` Node, then a `MappingProjection` is created from the `primary OutputPort + ` of the sender Mechanism (or of its sole `OUTPUT ` Node if the + sender is a Compostion) to each `INPUT ` Node of the receiver, and a *set* + containing the Projections is intercolated between the two Nodes in the `Pathway`. + + * *Many to one* - if the first Node (sender) is a Composition with more than one `OUTPUT ` + Node, and the second (receiver) is either a Mechanism or a Composition with a single `INPUT ` + Node, then a `MappingProjection` is created from each `OUPUT ` Node of the sender to the + `primary InputPort ` of the receiver Mechanism (or of its sole `INPUT ` + Node if the receiver is a Composition), and a *set* containing the Projections is intercolated + between the two Nodes in the `Pathway`. + + * *Many to many* - if both Nodes are Compositions in which the sender has more than one `INPUT ` + Node and the receiver has more than one `INPUT ` Node, it is not possible to determine + the correct configuration automatically, and an error is generated. In this case, a set of Projections + must be explicitly specified. + + .. _note:: + Any specifications of the **monitor_for_control** `argument ` + of a constructor for a `ControlMechanism` or the **monitor** argument in the constructor for an + `ObjectiveMechanism` in the **objective_mechanism** `argument ` of a + ControlMechanism supercede any MappingProjections that would otherwise be created for them when specified + in the **pathway** argument of add_linear_processing_pathway. Arguments --------- @@ -5596,11 +6008,12 @@ def add_linear_processing_pathway(self, pathway, name:str=None, context=None, *a pathway : `Node `, list or `Pathway` specifies the `Nodes `, and optionally `Projections `, used to construct a processing `Pathway `. Any standard form of `Pathway specification ` can - be used, however if a 2-item (Pathway, LearningFunction) tuple is used the `LearningFunction` will be - ignored (this should be used with `add_linear_learning_pathway` if a `learning Pathway + be used, however if a 2-item (Pathway, LearningFunction) tuple is used, the `LearningFunction` is ignored + (this should be used with `add_linear_learning_pathway` if a `learning Pathway ` is desired). A `Pathway` object can also be used; again, however, any - learning-related specifications will be ignored, as will its `name ` if the **name** + learning-related specifications are ignored, as are its `name ` if the **name** argument of add_linear_processing_pathway is specified. + See `above ` for additional details. name : str species the name used for `Pathway`; supercedes `name ` of `Pathway` object if it is has one. @@ -5712,90 +6125,121 @@ def add_linear_processing_pathway(self, pathway, name:str=None, context=None, *a receiver = pathway[c][0] else: receiver = pathway[c] - proj = self.add_projection(sender=sender, - receiver=receiver) + + # If sender and/or receiver is a Composition with INPUT or OUTPUT Nodes, + # replace it with those Nodes + senders = self._get_nested_nodes_with_same_roles_at_all_levels(sender, NodeRole.OUTPUT) + receivers = self._get_nested_nodes_with_same_roles_at_all_levels(receiver, + NodeRole.INPUT, NodeRole.TARGET) + if senders or receivers: + senders = senders or convert_to_list(sender) + receivers = receivers or convert_to_list(receiver) + if len(senders) > 1 and len(receivers) > 1: + raise CompositionError(f"Pathway specified with two contiguous Compositions, the first of " + f"which {sender.name} has more than one OUTPUT Node and second of" + f"which {receiver.name} has more than one INPUT Node, making the " + f"configuration of Projections between them ambigous. Please " + f"specify those Projections explicity.") + proj = {self.add_projection(sender=s, receiver=r, allow_duplicates=False) + for r in receivers for s in senders} + else: + proj = self.add_projection(sender=sender, receiver=receiver) if proj: projections.append(proj) # if the current item is a Projection specification elif _is_pathway_entry_spec(pathway[c], PROJECTION): - if c == len(pathway) - 1: - raise CompositionError(f"The last item in the {pathway_arg_str} cannot be a Projection: " - f"{pathway[c]}.") - # confirm that it is between two nodes, then add the projection - if isinstance(pathway[c], tuple): - proj = pathway[c][0] - feedback = pathway[c][1] + # Convert pathway[c] to list (embedding in one if matrix) for consistency of handling below + # try: + # proj_specs = set(convert_to_list(pathway[c])) + # except TypeError: + # proj_specs = [pathway[c]] + if is_numeric(pathway[c]): + proj_specs = [pathway[c]] else: - proj = pathway[c] - feedback = False - sender = pathway[c - 1] - receiver = pathway[c + 1] - if _is_node_spec(sender) and _is_node_spec(receiver): - if isinstance(sender, tuple): - sender = sender[0] - if isinstance(receiver, tuple): - receiver = receiver[0] - try: - if isinstance(proj, (np.ndarray, np.matrix, list)): - # If proj is a matrix specification, use it as the matrix arg - proj = MappingProjection(sender=sender, - matrix=proj, - receiver=receiver) - else: - # Otherwise, if it is Port specification, implement default Projection - try: - if isinstance(proj, InputPort): - proj = MappingProjection(sender=sender, - receiver=proj) - elif isinstance(proj, OutputPort): - proj = MappingProjection(sender=proj, - receiver=receiver) - except (InputPortError, ProjectionError) as error: - # raise CompositionError(f"Bad Projection specification in {pathway_arg_str}: {proj}.") - raise ProjectionError(str(error.error_value)) - - except (InputPortError, ProjectionError, MappingError) as error: + proj_specs = convert_to_list(pathway[c]) + proj_set = [] + for proj_spec in proj_specs: + if c == len(pathway) - 1: + raise CompositionError(f"The last item in the {pathway_arg_str} cannot be a Projection: " + f"{proj_spec}.") + # confirm that it is between two nodes, then add the projection + if isinstance(proj_spec, tuple): + proj = proj_spec[0] + feedback = proj_spec[1] + else: + proj = proj_spec + feedback = False + sender = pathway[c - 1] + receiver = pathway[c + 1] + if _is_node_spec(sender) and _is_node_spec(receiver): + if isinstance(sender, tuple): + sender = sender[0] + if isinstance(receiver, tuple): + receiver = receiver[0] + try: + if isinstance(proj, (np.ndarray, np.matrix, list)): + # If proj is a matrix specification, use it as the matrix arg + proj = MappingProjection(sender=sender, + matrix=proj, + receiver=receiver) + else: + # Otherwise, if it is Port specification, implement default Projection + try: + if isinstance(proj, InputPort): + proj = MappingProjection(sender=sender, + receiver=proj) + elif isinstance(proj, OutputPort): + proj = MappingProjection(sender=proj, + receiver=receiver) + except (InputPortError, ProjectionError) as error: + raise ProjectionError(str(error.error_value)) + + except (InputPortError, ProjectionError, MappingError) as error: raise CompositionError(f"Bad Projection specification in {pathway_arg_str} ({proj}): " f"{str(error.error_value)}") - except DuplicateProjectionError: - # FIX: 7/22/19 ADD WARNING HERE?? - # FIX: 7/22/19 MAKE THIS A METHOD ON Projection?? - duplicate = [p for p in receiver.afferents if p in sender.efferents] - assert len(duplicate)==1, \ - f"PROGRAM ERROR: Could not identify duplicate on DuplicateProjectionError " \ - f"for {Projection.__name__} between {sender.name} and {receiver.name} " \ - f"in call to {repr('add_linear_processing_pathway')} for {self.name}." - duplicate = duplicate[0] - warning_msg = f"Projection specified between {sender.name} and {receiver.name} " \ - f"in {pathway_arg_str} is a duplicate of one" - # IMPLEMENTATION NOTE: Version that allows different Projections between same - # sender and receiver in different Compositions - # if duplicate in self.projections: - # warnings.warn(f"{warning_msg} already in the Composition ({duplicate.name}) " - # f"and so will be ignored.") - # proj=duplicate - # else: - # if self.prefs.verbosePref: - # warnings.warn(f" that already exists between those nodes ({duplicate.name}). The " - # f"new one will be used; delete it if you want to use the existing one") - # Version that forbids *any* duplicate Projections between same sender and receiver - warnings.warn(f"{warning_msg} that already exists between those nodes ({duplicate.name}) " - f"and so will be ignored.") - proj=duplicate - - proj = self.add_projection(projection=proj, - sender=sender, - receiver=receiver, - feedback=feedback, - allow_duplicates=False) - if proj: - projections.append(proj) - + except DuplicateProjectionError: + # FIX: 7/22/19 ADD WARNING HERE?? + # FIX: 7/22/19 MAKE THIS A METHOD ON Projection?? + duplicate = [p for p in receiver.afferents if p in sender.efferents] + assert len(duplicate)==1, \ + f"PROGRAM ERROR: Could not identify duplicate on DuplicateProjectionError " \ + f"for {Projection.__name__} between {sender.name} and {receiver.name} " \ + f"in call to {repr('add_linear_processing_pathway')} for {self.name}." + duplicate = duplicate[0] + warning_msg = f"Projection specified between {sender.name} and {receiver.name} " \ + f"in {pathway_arg_str} is a duplicate of one" + # IMPLEMENTATION NOTE: Version that allows different Projections between same + # sender and receiver in different Compositions + # if duplicate in self.projections: + # warnings.warn(f"{warning_msg} already in the Composition ({duplicate.name}) " + # f"and so will be ignored.") + # proj=duplicate + # else: + # if self.prefs.verbosePref: + # warnings.warn(f" that already exists between those nodes ({duplicate.name}). The " + # f"new one will be used; delete it if you want to use the existing one") + # Version that forbids *any* duplicate Projections between same sender and receiver + warnings.warn(f"{warning_msg} that already exists between those nodes ({duplicate.name}) " + f"and so will be ignored.") + proj=duplicate + + proj = self.add_projection(projection=proj, + sender=sender, + receiver=receiver, + feedback=feedback, + allow_duplicates=False) + if proj: + proj_set.append(proj) + else: + raise CompositionError(f"A Projection specified in {pathway_arg_str} " + f"is not between two Nodes: {pathway[c]}") + if len(proj_set) == 1: + projections.append(proj_set[0]) else: - raise CompositionError(f"A Projection specified in {pathway_arg_str} " - f"is not between two Nodes: {pathway[c]}") + projections.append(proj_set) + else: raise CompositionError(f"An entry in {pathway_arg_str} is not a Node (Mechanism or Composition) " f"or a Projection: {repr(pathway[c])}.") @@ -5843,11 +6287,7 @@ def add_linear_processing_pathway(self, pathway, name:str=None, context=None, *a context=context) self.pathways.append(pathway) - # FIX 4/4/20 [JDC]: Reset to None for now to replicate prior behavior, - # but need to implement proper behavior wrt call to analyze_graph() - # _check_initalization_state() - # 10/22/20 [KDM]: Pass no context instead of setting to None - self._analyze_graph() + self._analyze_graph(context) return pathway @@ -5905,12 +6345,8 @@ def add_pathways(self, pathways, context=None): pathways = convert_to_list(pathways) # Possibility 2 (list is a single pathway spec): - # # MODIFIED 5/17/20 OLD: - # if isinstance(pathways, list) and all(_is_node_spec(p) for p in pathways): - # MODIFIED 5/17/20 NEW: if (isinstance(pathways, list) and _is_node_spec(pathways[0]) and all(_is_pathway_entry_spec(p, ANY) for p in pathways)): - # MODIFIED 5/17/20 END # Place in outter list (to conform to processing of multiple pathways below) pathways = [pathways] # If pathways is not now a list it must be illegitimate @@ -5999,7 +6435,9 @@ def identify_pway_type_and_parse_tuple_prn(pway, tuple_or_dict_str): return added_pathways - # ------------------------------------------ LEARNING ------------------------------------------------------------ + # endregion PROCESSING PATHWAYS + + # region ------------------------------------ LEARNING ------------------------------------------------------------- @handle_external_context() def add_linear_learning_pathway(self, @@ -6008,11 +6446,7 @@ def add_linear_learning_pathway(self, loss_function=None, learning_rate:tc.any(int,float)=0.05, error_function=LinearCombination, - # # MODIFIED 5/25/20 OLD: - # learning_update:tc.any(bool, tc.enum(ONLINE, AFTER))=ONLINE, - # MODIFIED 5/25/20 NEW: learning_update:tc.any(bool, tc.enum(ONLINE, AFTER))=AFTER, - # MODIFIED 5/25/20 END name:str=None, context=None): """Implement learning pathway (including necessary `learning components `. @@ -6391,7 +6825,7 @@ def _create_learning_components(self, **kwargs # Use of type-specific learning arguments ): - # ONLY DO THIS IF ONE DOESN'T ALREADY EXIST (?pass in argument determing this?) + # ONLY DO THIS IF ONE DOESN'T ALREADY EXIST (?pass in argument determining this?) learning_mechanism = LearningMechanism(function=learning_function, default_variable=[sender_activity_source.output_ports[0].value, receiver_activity_source.output_ports[0].value, @@ -6454,6 +6888,9 @@ def _create_learning_related_mechanisms(self, learning_enabled=learning_update, in_composition=True, name="Learning Mechanism for " + learned_projection.name) + + objective_mechanism.modulatory_mechanism = learning_mechanism + else: raise CompositionError(f"'learning_function' argument of add_linear_learning_pathway " f"({learning_function}) must be a class of {LearningFunction.__name__} or a " @@ -6529,6 +6966,8 @@ def _create_rl_related_mechanisms(self, in_composition=True, name="Learning Mechanism for " + learned_projection.name) + objective_mechanism.modulatory_mechanism = learning_mechanism + return target_mechanism, objective_mechanism, learning_mechanism def _create_td_related_mechanisms(self, @@ -6759,7 +7198,8 @@ def bfs(start): p.insert(0, curr_node) curr_node = prev[curr_node] p.insert(0, curr_node) - # we only consider input -> projection -> ... -> output pathways (since we can't learn on only one mechanism) + # we only consider input -> projection -> ... -> output pathways + # (since we can't learn on only one mechanism) if len(p) >= 3: pathways.append(p) continue @@ -6826,6 +7266,8 @@ def _create_terminal_backprop_learning_components(self, in_composition=True, name="Learning Mechanism for " + learned_projection.name) + objective_mechanism.modulatory_mechanism = learning_mechanism + self.add_nodes(nodes=[(target_mechanism, NodeRole.TARGET), (objective_mechanism, NodeRole.LEARNING_OBJECTIVE), learning_mechanism], @@ -6863,7 +7305,9 @@ def _create_non_terminal_backprop_learning_components(self, # error_sources will be empty (as they have been dealt with in self._get_back_prop_error_sources # error_projections will contain list of any created to be added to the Composition below if learning_mechanism: - error_sources, error_projections = self._get_back_prop_error_sources(output_source, learning_mechanism, context) + error_sources, error_projections = self._get_back_prop_error_sources(output_source, + learning_mechanism, + context) # If learning_mechanism does not yet exist: # error_sources will contain ones needed to create learning_mechanism # error_projections will be empty since they can't be created until the learning_mechanism is created below; @@ -7057,19 +7501,27 @@ def _get_deeply_nested_aux_projections(self, node): aux_projections[i] = i nested_nodes = self._get_nested_nodes() for spec, proj in aux_projections.items(): - if proj.receiver.owner not in self.nodes and \ - proj.receiver.owner in [i[0] for i in nested_nodes if not i[1] in self.nodes]: + # FIX: TREATMENT OF RECEIVERS SEEMS TO DEAL WITH ONLY RECEIVERS IN COMPS NESTED MORE THAN ON LEVEL DEEP + # REMOVING "if not i[1] in self.nodes" crashes in test_multilevel_control + if ((proj.sender.owner not in self.nodes + and proj.sender.owner in [i[0] for i in nested_nodes]) + or (proj.receiver.owner not in self.nodes + and proj.receiver.owner in [i[0] for i in nested_nodes if not i[1] in self.nodes])): deeply_nested_projections[spec] = proj return deeply_nested_projections + # endregion LEARNING PATHWAYS + + # endregion PATHWAYS + # ****************************************************************************************************************** - # CONTROL + # region ------------------------------------- CONTROL ------------------------------------------------------------- # ****************************************************************************************************************** @handle_external_context() def add_controller(self, controller:ControlMechanism, context=None): """ - Add an `ControlMechanism` as the `controller ` of the Composition. + Add a `ControlMechanism` as the `controller ` of the Composition. This gives the ControlMechanism access to the `Composition`'s `evaluate ` method. This allows subclasses of ControlMechanism that can use this (such as `OptimizationControlMechanism`) to execute @@ -7081,13 +7533,15 @@ def add_controller(self, controller:ControlMechanism, context=None): `, and a `ControlProjection` to its correponding `ParameterPort`. The ControlMechanism is assigned the `NodeRole` `CONTROLLER`. - """ if not isinstance(controller, ControlMechanism): raise CompositionError(f"Specification of {repr(CONTROLLER)} arg for {self.name} " f"must be a {repr(ControlMechanism.__name__)} ") + # Call with context to avoid recursion by analyze_graph -> _check_initialization_status -> add_controller + context.source = ContextFlags.METHOD + # VALIDATE AND ADD CONTROLLER # Note: initialization_status here pertains to controller's own initialization status @@ -7114,79 +7568,51 @@ def add_controller(self, controller:ControlMechanism, context=None): f"for another {COMPOSITION} ({controller.composition.name}); assignment ignored.") return - # Warn if current one is being replaced, and remove Projections for old one + # Remove existing controller if there is one if self.controller: + # Warn if current one is being replaced if self.prefs.verbosePref: warnings.warn(f"The existing {CONTROLLER} for {self.name} ({self.controller.name}) " f"is being replaced by {controller.name}.") - for proj in self.projections: + # Remove Projections for old one + for proj in self.projections.copy(): if (proj in self.controller.afferents or proj in self.controller.efferents): self.remove_projection(proj) + Projection_Base._delete_projection(proj) self.controller.composition=None + # Assign mutual references between Composition and controller controller.composition = self self.controller = controller # Having controller in nodes is not currently supported (due to special handling of scheduling/execution); # its NodeRole assignment is handled directly by the get_nodes_by_role and get_roles_by_node methods. # self._add_node_role(controller, NodeRole.CONTROLLER) - # ADD AUX_COMPONENTS RELEVANT TO CONTROLLER - + # Check aux_components relevant to controller invalid_aux_components = self._get_invalid_aux_components(controller) - if invalid_aux_components: self._controller_initialization_status = ContextFlags.DEFERRED_INIT + return - if self.controller.objective_mechanism and self.controller.objective_mechanism not in invalid_aux_components: - self.add_node(self.controller.objective_mechanism, required_roles=NodeRole.CONTROLLER_OBJECTIVE) + # ADD MONITORING COMPONENTS ----------------------------------------------------- - self.node_ordering.append(controller) + self._handle_allow_probes_for_control(self.controller) - self.enable_controller = True + if self.controller.objective_mechanism: + # If controller has objective_mechanism, then add it and all associated Projections to Composition + if self.controller.objective_mechanism not in invalid_aux_components: + self.controller._validate_monitor_for_control(self._get_all_nodes()) + self.add_node(self.controller.objective_mechanism, required_roles=NodeRole.CONTROLLER_OBJECTIVE) + else: + # Otherwise, if controller has any afferent inputs (from items in monitor_for_control), add them + if self.controller.input_ports and self.controller.input_port.path_afferents: + self._add_node_aux_components(controller, context) - controller._activate_projections_for_compositions(self) - # Call with context to avoid recursion by analyze_graph -> _check_inialization_status -> add_controller - context.source = ContextFlags.METHOD - self._analyze_graph(context=context) - self._update_shadows_dict(controller) - - # INSTANTIATE SHADOW_INPUT PROJECTIONS - # Skip controller's first (OUTCOME) input_port (that receives the Projection from its objective_mechanism - nested_cims = [comp.input_CIM for comp in self._get_nested_compositions()] - input_cims= [self.input_CIM] + nested_cims - # For the rest of the controller's input_ports if they are marked as receiving SHADOW_INPUTS, - # instantiate the shadowing Projection to them from the sender to the shadowed InputPort - for input_port in controller.input_ports[1:]: - if hasattr(input_port, SHADOW_INPUTS) and input_port.shadow_inputs is not None: - for proj in input_port.shadow_inputs.path_afferents: - try: - sender = proj.sender - if sender.owner not in input_cims: - self.add_projection(projection=MappingProjection(sender=sender, receiver=input_port), - sender=sender.owner, - receiver=controller) - shadow_proj._activate_for_compositions(self) - else: - if not sender.owner.composition == self: - sender_input_cim = sender.owner - proj_index = sender_input_cim.output_ports.index(sender) - sender_corresponding_input_projection = sender_input_cim.input_ports[ - proj_index].path_afferents[0] - input_projection_sender = sender_corresponding_input_projection.sender - if input_projection_sender.owner == self.input_CIM: - shadow_proj = MappingProjection(sender=input_projection_sender, - receiver = input_port) - shadow_proj._activate_for_compositions(self) - else: - shadow_proj = MappingProjection(sender=proj.sender, receiver=input_port) - shadow_proj._activate_for_compositions(self) - except DuplicateProjectionError: - continue - for proj in input_port.path_afferents: - if proj.sender.owner not in nested_cims: - proj._activate_for_compositions(self) + # This is set by add_node() automatically if there is an objective_mechanism; + # needs to be set here to insure call at run time (to catch any new nodes that may have been added) + self.needs_update_controller = True - # Check whether controller has input, and if not then disable + # Confirm that controller has input, and if not then disable it if not (isinstance(self.controller.input_ports, ContentAddressableList) and self.controller.input_ports): # If controller was enabled, warn that it has been disabled @@ -7196,32 +7622,82 @@ def add_controller(self, controller:ControlMechanism, context=None): self.enable_controller = False return - # ADD ANY ControlSignals SPECIFIED BY NODES IN COMPOSITION + # ADD MODULATORY COMPONENTS ----------------------------------------------------- # Get rid of default ControlSignal if it has no ControlProjections controller._remove_default_control_signal(type=CONTROL_SIGNAL) + # Instantiate control specifications locally (on nodes) and/or on controller + self._instantiate_control_projections(context=context) + # Instantiate any + for node in self.nodes: + self._instantiate_deferred_init_control(node, context) - # Add any ControlSignals specified for ParameterPorts of Nodes already in the Composition - control_signal_specs = self._get_control_signals_for_composition() - for ctl_sig_spec in control_signal_specs: - # FIX: 9/14/19: THIS SHOULD BE HANDLED IN _instantiate_projection_to_port - # CALLED FROM _instantiate_control_signal - # SHOULD TRAP THAT ERROR AND GENERATE CONTEXT-APPROPRIATE ERROR MESSAGE - # Don't add any that are already on the ControlMechanism + if RANDOMIZATION_CONTROL_SIGNAL not in self.controller.output_ports.names: + try: + self.controller._create_randomization_control_signal(context) + except AttributeError: + # ControlMechanism does not use RANDOMIZATION_CONTROL_SIGNAL + pass + else: + self.controller.function.parameters.randomization_dimension._set( + self.controller.output_ports.names.index(RANDOMIZATION_CONTROL_SIGNAL), + context + ) - # FIX: 9/14/19 - IS THE CONTEXT CORRECT (TRY TRACKING IN SYSTEM TO SEE WHAT CONTEXT IS): - ctl_signal = controller._instantiate_control_signal(control_signal=ctl_sig_spec, - context=context) - controller.control.append(ctl_signal) - # FIX: 9/15/19 - WHAT IF NODE THAT RECEIVES ControlProjection IS NOT YET IN COMPOSITON: - # ?DON'T ASSIGN ControlProjection? - # ?JUST DON'T ACTIVATE IT FOR COMPOSITON? - # ?PUT IT IN aux_components FOR NODE? - # ! TRACE THROUGH _activate_projections_for_compositions TO SEE WHAT IT CURRENTLY DOES - controller._activate_projections_for_compositions(self) + # ACTIVATE FOR COMPOSITION ----------------------------------------------------- + + self.node_ordering.append(controller) + self.enable_controller = True + # FIX: 11/15/21 - SHOULD THIS METHOD BE MOVED HERE (TO COMPOSITION) FROM ControlMechanism + controller._activate_projections_for_compositions(self) + self._analyze_graph(context=context) if not invalid_aux_components: self._controller_initialization_status = ContextFlags.INITIALIZED - self._analyze_graph(context=context) + + def _instantiate_deferred_init_control(self, node, context=None): + """ + If node is a Composition with a controller, activate its nodes' deferred init control specs for its controller. + If it does not have a controller, but self does, activate them for self's controller. + + If node is a Node that has deferred init control specs and self has a controller, activate the deferred init + control specs for self's controller. + + Called recursively on nodes that are nested Compositions. + + Returns + ------- + + list of hanging control specs that were not able to be assigned for a controller at any level of nesting. + + """ + hanging_control_specs = [] + if node.componentCategory == 'Composition': + nested_comp = node # For readability + for node_in_nested_comp in nested_comp.nodes: + hanging_control_specs.extend(nested_comp._instantiate_deferred_init_control(node_in_nested_comp, + context=context)) + assert True + else: + hanging_control_specs = node._get_parameter_port_deferred_init_control_specs() + if not self.controller: + return hanging_control_specs + else: + for spec in hanging_control_specs: + control_signal = self.controller._instantiate_control_signal(control_signal=spec, + context=context) + self.controller.control.append(control_signal) + self.controller._activate_projections_for_compositions(self) + return [] + + def _get_monitor_for_control_nodes(self): + """Return dict of {nodes : ControlMechanism that monitors it} for any nodes monitored for control in Composition + """ + monitored_nodes = {} + for node in self._all_nodes: + if isinstance(node, ControlMechanism): + monitored_nodes.update({spec.owner if isinstance(spec, Port) else spec : node + for spec in node.monitor_for_control}) + return monitored_nodes def _get_control_signals_for_composition(self): """Return list of ControlSignals specified by Nodes in the Composition @@ -7251,82 +7727,6 @@ def _get_control_signals_for_composition(self): control_signal_specs.extend(node._get_parameter_port_deferred_init_control_specs()) return control_signal_specs - def _build_predicted_inputs_dict(self, predicted_input): - inputs = {} - # ASSUMPTION: input_ports[0] is NOT a feature and input_ports[1:] are state_features - # If this is not a good assumption, we need another way to look up the feature InputPorts - # of the OCM and know which InputPort maps to which predicted_input value - - nested_nodes = dict(self._get_nested_nodes()) - for j in range(len(self.controller.input_ports) - 1): - input_port = self.controller.input_ports[j + 1] - if hasattr(input_port, SHADOW_INPUTS) and input_port.shadow_inputs is not None: - owner = input_port.shadow_inputs.owner - if self._controller_initialization_status == ContextFlags.DEFERRED_INIT \ - and owner not in nested_nodes \ - and owner not in self.nodes: - continue - if owner not in nested_nodes: - shadow_input_owner = input_port.shadow_inputs.owner - if isinstance(shadow_input_owner, CompositionInterfaceMechanism): - shadow_input_owner = shadow_input_owner.composition - inputs[shadow_input_owner] = predicted_input[j] - else: - comp = nested_nodes[owner] - if comp not in inputs: - inputs[comp]=[[predicted_input[j]]] - else: - inputs[comp]=np.concatenate([[predicted_input[j]],inputs[comp][0]]) - return inputs - - def _get_invalid_aux_components(self, controller): - valid_nodes = \ - [node for node in self.nodes.data] + \ - [node for node, composition in self._get_nested_nodes()] + \ - [self] - if self.controller: - valid_nodes.append(self.controller) - if hasattr(self.controller,'objective_mechanism'): - valid_nodes.append(self.controller.objective_mechanism) - invalid_components = [] - for aux in controller.aux_components: - component = None - if isinstance(aux, Projection): - component = aux - elif hasattr(aux, '__iter__'): - for i in aux: - if isinstance(i, Projection): - component = i - elif isinstance(i, Mechanism): - if self._get_invalid_aux_components(i): - invalid_components.append(i) - elif isinstance(aux, Mechanism): - if self._get_invalid_aux_components(aux): - invalid_components.append(aux) - if not component: - continue - if isinstance(component, Projection): - if hasattr(component.sender, OWNER_MECH): - sender_node = component.sender.owner_mech - else: - if isinstance(component.sender.owner, CompositionInterfaceMechanism): - sender_node = component.sender.owner.composition - else: - sender_node = component.sender.owner - if hasattr(component.receiver, OWNER_MECH): - receiver_node = component.receiver.owner_mech - else: - if isinstance(component.receiver.owner, CompositionInterfaceMechanism): - receiver_node = component.receiver.owner.composition - else: - receiver_node = component.receiver.owner - if not all([sender_node in valid_nodes, receiver_node in valid_nodes]): - invalid_components.append(component) - if invalid_components: - return invalid_components - else: - return [] - def reshape_control_signal(self, arr): current_shape = np.shape(arr) @@ -7337,90 +7737,153 @@ def reshape_control_signal(self, arr): return np.array(arr) - def _get_total_cost_of_control_allocation(self, control_allocation, context, runtime_params): - total_cost = 0. - if control_allocation is not None: # using "is not None" in case the control allocation is 0. + def _instantiate_control_projections(self, context): + """ + Add any ControlProjections for control specified locally on nodes in Composition + """ - base_control_allocation = self.reshape_control_signal(self.controller.parameters.value._get(context)) + # Add any ControlSignals specified for ParameterPorts of Nodes already in the Composition + control_signal_specs = self._get_control_signals_for_composition() + for ctl_sig_spec in control_signal_specs: + # FIX: 9/14/19: THIS SHOULD BE HANDLED IN _instantiate_projection_to_port + # CALLED FROM _instantiate_control_signal + # SHOULD TRAP THAT ERROR AND GENERATE CONTEXT-APPROPRIATE ERROR MESSAGE + # Don't add any that are already on the ControlMechanism - candidate_control_allocation = self.reshape_control_signal(control_allocation) + # FIX: 9/14/19 - IS THE CONTEXT CORRECT (TRY TRACKING IN SYSTEM TO SEE WHAT CONTEXT IS): + ctl_signal = self.controller._instantiate_control_signal(control_signal=ctl_sig_spec, context=context) - # Get reconfiguration cost for candidate control signal - reconfiguration_cost = 0. - if callable(self.controller.compute_reconfiguration_cost): - reconfiguration_cost = self.controller.compute_reconfiguration_cost([candidate_control_allocation, - base_control_allocation]) - self.controller.reconfiguration_cost.set(reconfiguration_cost, context) + self.controller.control.append(ctl_signal) - # Apply candidate control signal - self.controller._apply_control_allocation(candidate_control_allocation, - context=context, - runtime_params=runtime_params, - ) - - # Get control signal costs - other_costs = self.controller.parameters.costs._get(context) or [] - all_costs = convert_to_np_array(other_costs + [reconfiguration_cost]) - # Compute a total for the candidate control signal(s) - total_cost = self.controller.combine_costs(all_costs) - return total_cost + # MODIFIED 11/21/21 OLD: FIX: WHY IS THIS INDENTED? WON'T CALL OUTSIDE LOOP ACTIVATE ALL PROJECTIONS? + # FIX: 9/15/19 - WHAT IF NODE THAT RECEIVES ControlProjection IS NOT YET IN COMPOSITION: + # ?DON'T ASSIGN ControlProjection? + # ?JUST DON'T ACTIVATE IT FOR COMPOSITON? + # ?PUT IT IN aux_components FOR NODE? + # ! TRACE THROUGH _activate_projections_for_compositions TO SEE WHAT IT CURRENTLY DOES + self.controller._activate_projections_for_compositions(self) + + def _route_control_projection_through_intermediary_pcims(self, + projection, + sender, + sender_mechanism, + receiver, + graph_receiver, + context): + """ + Takes as input a specification for a projection to a parameter port that is nested n-levels below its sender, + instantiates and activates ports and projections on intermediary pcims, and returns a new + projection specification from the original sender to the relevant input port of the pcim of the Composition + located in the same level of nesting. + """ + for proj in receiver.mod_afferents: + if proj.sender.owner == sender_mechanism: + receiver._remove_projection_to_port(proj) + for proj in sender.efferents: + if proj.receiver == receiver: + sender._remove_projection_from_port(proj) + modulation = sender.modulation + interface_input_port = InputPort(owner=graph_receiver.parameter_CIM, + variable=receiver.defaults.value, + reference_value=receiver.defaults.value, + name=PARAMETER_CIM_NAME + "_" + receiver.owner.name + "_" + receiver.name, + context=context) + graph_receiver.parameter_CIM.add_ports([interface_input_port], context=context) + # control signal for parameter CIM that will project directly to inner Composition's parameter + control_signal = ControlSignal( + modulation=modulation, + variable=(OWNER_VALUE, functools.partial(graph_receiver.parameter_CIM.get_input_port_position, + interface_input_port)), + transfer_function=Identity, + modulates=receiver, + name=PARAMETER_CIM_NAME + "_" + receiver.owner.name + "_" + receiver.name, + ) + if receiver.owner not in graph_receiver.nodes.data + graph_receiver.cims: + receiver = interface_input_port + graph_receiver.parameter_CIM.add_ports([control_signal], context=context) + # add sender and receiver to self.parameter_CIM_ports dict + for p in control_signal.projections: + # self.add_projection(p) + graph_receiver.add_projection(p, receiver=p.receiver, sender=control_signal) + try: + sender._remove_projection_to_port(projection) + except ValueError: + pass + try: + receiver._remove_projection_from_port(projection) + except ValueError: + pass + receiver = interface_input_port + return MappingProjection(sender=sender, receiver=receiver) - def _check_projection_initialization_status(self, context=None): - """Checks initialization status of controller (if applicable) and any projections or ports + def _check_controller_initialization_status(self, context=None): + """Checks initialization status of controller (if applicable) all Projections or Ports in the Composition """ # Avoid recursion if called from add_controller (by way of analyze_graph) since that is called below if context and context.source == ContextFlags.METHOD: return - # Check if controller is in deferred init + # If controller is in deferred init, try to instantiate and add it to Composition if self.controller and self._controller_initialization_status == ContextFlags.DEFERRED_INIT: self.add_controller(self.controller, context=context) - # Don't bother checking any further if from COMMAND_LINE or COMPOSITION (i.e., anything other than Run) - # since no need to detect deferred_init and generate errors until runtime - if context and context.source in {ContextFlags.COMMAND_LINE, ContextFlags.COMPOSITION}: - return + # Don't bother checking any further if from COMMAND_LINE or COMPOSITION (i.e., anything other than Run) + # since no need to detect deferred_init and generate errors until runtime + if context and context.source in {ContextFlags.COMMAND_LINE, ContextFlags.COMPOSITION}: + return - if self._controller_initialization_status == ContextFlags.DEFERRED_INIT: - invalid_aux_components = self._get_invalid_aux_components(self.controller) - for component in invalid_aux_components: - if isinstance(component, Projection): - if hasattr(component.receiver, OWNER_MECH): - owner = component.receiver.owner_mech - else: - owner = component.receiver.owner - warnings.warn( - f"The controller of {self.name} has been specified to project to {owner.name}, " - f"but {owner.name} is not in {self.name} or any of its nested Compositions. " - f"This projection will be deactivated until {owner.name} is added to {self.name} " - f"in a compatible way." - ) - elif isinstance(component, Mechanism): - warnings.warn( - f"The controller of {self.name} has a specification that includes the Mechanism " - f"{component.name}, but {component.name} is not in {self.name} or any of its " - f"nested Compositions. This Mechanism will be deactivated until {component.name} is " - f"added to {self.name} or one of its nested Compositions in a compatible way." - ) + # Check for Mechanisms and Projections in aux_components + if self._controller_initialization_status == ContextFlags.DEFERRED_INIT: + invalid_aux_components = self._get_invalid_aux_components(self.controller) + for component in invalid_aux_components: + if isinstance(component, Projection): + if hasattr(component.receiver, OWNER_MECH): + owner = component.receiver.owner_mech + else: + owner = component.receiver.owner + warnings.warn( + f"The controller of {self.name} has been specified to project to {owner.name}, " + f"but {owner.name} is not in {self.name} or any of its nested Compositions. " + f"This projection will be deactivated until {owner.name} is added to {self.name} " + f"in a compatible way." + ) + elif isinstance(component, Mechanism): + warnings.warn( + f"The controller of {self.name} has a specification that includes the Mechanism " + f"{component.name}, but {component.name} is not in {self.name} or any of its " + f"nested Compositions. This Mechanism will be deactivated until {component.name} is " + f"added to {self.name} or one of its nested Compositions in a compatible way." + ) # If Composition is not preparing to execute, allow deferred_inits to persist without warning if context and ContextFlags.PREPARING not in context.execution_phase: return + # Check for deferred init ControlProjections for node in self.nodes: - # Check for deferred init projections for projection in node.projections: if projection.initialization_status == ContextFlags.DEFERRED_INIT: - # NOTE: - # May want to add other conditions and warnings here. Currently - # just checking for unresolved control projections. if isinstance(projection, ControlProjection): warnings.warn(f"The {projection.receiver.name} parameter of {projection.receiver.owner.name} \n" f"is specified for control, but {self.name} does not have a controller. Please \n" f"add a controller to {self.name} or the control specification will be \n" f"ignored.") + def _check_nodes_initialization_status(self, context=None): + + # Avoid recursion if called from add_controller (by way of analyze_graph) since that is called below. + # Don't bother checking if from COMMAND_LINE or COMPOSITION (i.e., anything other than Run) + # since no need to detect deferred_init and generate errors until runtime. + # If Composition is not preparing to execute, allow deferred_inits to persist without warning + if context and (context.source in {ContextFlags.METHOD, ContextFlags.COMMAND_LINE, ContextFlags.COMPOSITION} + or ContextFlags.PREPARING not in context.execution_phase): + return + + # NOTE: + # May want to add other conditions and warnings here. + # Currently just checking for unresolved projections. + for node in self._partially_added_nodes: for proj in self._get_invalid_aux_components(node): receiver = proj.receiver.owner @@ -7431,11 +7894,87 @@ def _check_projection_initialization_status(self, context=None): f"or a composition nested within it." ) + # FIX: 11/3/21 ??GET RID OF THIS AND CALL TO IT ONCE PROJECTIONS HAVE BEEN IMPLEMENTED FOR SHADOWED INPUTS + # CHECK WHETHER state_input_ports ADD TO OR REPLACE shadowed_inputs + def _build_predicted_inputs_dict(self, predicted_input): + """Get inputs for evaluate method used to execute simulations of Composition. + + Get values of state_input_ports which receive projections from items providing relevant input (and any + processing of those values specified + """ + inputs = {} + no_predicted_input = (predicted_input is None or not len(predicted_input)) + if no_predicted_input: + warnings.warn(f"{self.name}.evaluate() called without any inputs specified; default values will be used") + + nested_nodes = dict(self._get_nested_nodes()) + # FIX: 11/3/21 NEED TO MODIFY IF OUTCOME InputPorts ARE MOVED + shadow_inputs_start_index = self.controller.num_outcome_input_ports + for j in range(len(self.controller.input_ports) - shadow_inputs_start_index): + input_port = self.controller.input_ports[j + shadow_inputs_start_index] + if no_predicted_input: + shadowed_input = input_port.defaults.value + else: + shadowed_input = predicted_input[j] + + if hasattr(input_port, SHADOW_INPUTS) and input_port.shadow_inputs is not None: + shadow_input_owner = input_port.shadow_inputs.owner + if self._controller_initialization_status == ContextFlags.DEFERRED_INIT \ + and shadow_input_owner not in nested_nodes \ + and shadow_input_owner not in self.nodes: + continue + if shadow_input_owner not in nested_nodes: + if isinstance(shadow_input_owner, CompositionInterfaceMechanism): + shadow_input_owner = shadow_input_owner.composition + inputs[shadow_input_owner] = shadowed_input + else: + comp = nested_nodes[shadow_input_owner] + if comp not in inputs: + inputs[comp]=[[shadowed_input]] + else: + inputs[comp]=np.concatenate([[shadowed_input],inputs[comp][0]]) + return inputs + + def _get_total_cost_of_control_allocation(self, control_allocation, context, runtime_params): + total_cost = 0. + if control_allocation is not None: # using "is not None" in case the control allocation is 0. + + base_control_allocation = self.reshape_control_signal(self.controller.parameters.value._get(context)) + + candidate_control_allocation = self.reshape_control_signal(control_allocation) + + # Get reconfiguration cost for candidate control signal + reconfiguration_cost = 0. + if callable(self.controller.compute_reconfiguration_cost): + reconfiguration_cost = self.controller.compute_reconfiguration_cost([candidate_control_allocation, + base_control_allocation]) + self.controller.reconfiguration_cost.set(reconfiguration_cost, context) + + # Apply candidate control signal + self.controller._apply_control_allocation(candidate_control_allocation, + context=context, + runtime_params=runtime_params, + ) + + # Get control signal costs + other_costs = self.controller.parameters.costs._get(context) or [] + all_costs = convert_to_np_array(other_costs + [reconfiguration_cost]) + # Compute a total for the candidate control signal(s) + total_cost = self.controller.combine_costs(all_costs) + return total_cost + + # endregion CONTROL + + # ****************************************************************************************************************** + # region ------------------------------------ EXECUTION ------------------------------------------------------------ + # ****************************************************************************************************************** + + @handle_external_context() def evaluate( self, predicted_input=None, control_allocation=None, - num_simulation_trials=None, + num_trials=1, runtime_params=None, base_context=Context(execution_id=None), context=None, @@ -7443,23 +7982,35 @@ def evaluate( return_results=False, block_simulate=False ): - """Runs a simulation of the `Composition`, with the specified control_allocation, excluding its - `controller ` in order to return the - `net_outcome ` of the Composition, according to its - `controller ` under that control_allocation. All values are - reset to pre-simulation values at the end of the simulation. - - If `block_simulate` is set to True, the `controller ` will attempt to use the - entire input set provided to the `run ` method of the `Composition` as input for the - simulated call to `run `. If it is not, the `controller ` will - use the inputs slated for its next or previous execution, depending on whether the `controller_mode` of the - `Composition` is set to `before` or `after`, respectively. - - .. note:: - Block simulation can not be used if the Composition's stimuli were specified as a generator. If - `block_simulate` is set to True and the input type for the Composition was a generator, - block simulation will be disabled for the current execution of `evaluate `. + """Run Composition and compute `net_outcomes ` + + Runs the `Composition` in simulation mode (i.e., excluding its `controller `) + using the **predicted_input** and specified **control_allocation** for each run. The Composition is + run for ***num_trials**. + + If **predicted_input** is not specified, and `block_simulate` is set to True, the `controller + ` attempts to use the entire input set provided to the `run ` + method of the `Composition` as input for the call to `run `. If it is not, the `controller + ` uses the inputs slated for its next or previous execution, depending on whether the + `controller_mode ` of the `Composition` is set to `before` or `after`, + respectively. + + .. note:: + Block simulation can not be used if the Composition's stimuli were specified as a generator. + If `block_simulate` is set to True and the input type for the Composition was a generator, + block simulation will be disabled for the current execution of `evaluate `. + + The `net_outcome ` for each run is calculated using the `controller + `'s ` function. Each run is executed + independently, using the same **predicted_inputs** and **control_allocation**, and a randomly and + independently sampled seed for the random number generator. All values are reset to pre-simulation + values at the end of the simulation. + + Returns the `net_outcome ` of a run of the `agent_rep + `. If **return_results** is True, + an array with the results of each run is also returned. """ + # Apply candidate control to signal(s) for the upcoming simulation and determine its cost total_cost = self._get_total_cost_of_control_allocation(control_allocation, context, runtime_params) @@ -7496,32 +8047,46 @@ def evaluate( # Run Composition in "SIMULATION" context context.add_flag(ContextFlags.SIMULATION_MODE) context.remove_flag(ContextFlags.CONTROL) + + # EXECUTE run of composition and aggregate results + # Use reporting options from Report context created in initial (outer) call to run() - with Report(self, context) as report: - results = self.run(inputs=inputs, - context=context, - runtime_params=runtime_params, - num_trials=num_simulation_trials, - animate=animate, - execution_mode=execution_mode, - skip_initialization=True, - ) + with Report(self, context=context) as report: + result = self.run(inputs=inputs, + context=context, + runtime_params=runtime_params, + num_trials=num_trials, + animate=animate, + execution_mode=execution_mode, + skip_initialization=True, + ) context.remove_flag(ContextFlags.SIMULATION_MODE) context.execution_phase = ContextFlags.CONTROL if buffer_animate_state: self._animate = buffer_animate_state + assert result == self.get_output_values(context) + # Store simulation results on "base" composition if self.initialization_status != ContextFlags.INITIALIZING: try: - self.parameters.simulation_results._get(base_context).append( - self.get_output_values(context)) + self.parameters.simulation_results._get(base_context).append(result) except AttributeError: - self.parameters.simulation_results._set([self.get_output_values(context)], base_context) + self.parameters.simulation_results._set([result], base_context) + + # COMPUTE net_outcome and aggregate in net_outcomes # Update input ports in order to get correct value for "outcome" (from objective mech) self.controller._update_input_ports(runtime_params, context) - outcome = self.controller.input_port.parameters.value._get(context) + + # FIX: REFACTOR TO CREATE ARRAY OF INPUT_PORT VALUES FOR OUTCOME_INPUT_PORTS + outcome_is_array = self.controller.num_outcome_input_ports > 1 + if not outcome_is_array: + outcome = self.controller.input_port.parameters.value._get(context) + else: + outcome = [] + for i in range(self.controller.num_outcome_input_ports): + outcome.append(self.controller.parameters.input_ports._get(context)[i].parameters.value._get(context)) if outcome is None: net_outcome = 0.0 @@ -7530,11 +8095,10 @@ def evaluate( net_outcome = self.controller.compute_net_outcome(outcome, total_cost) if return_results: - return net_outcome, results + return net_outcome, result else: return net_outcome - def _infer_target_nodes(self, targets: dict): """ Maps targets onto target mechanisms (as needed by learning) @@ -7679,8 +8243,9 @@ def _parse_list(self, inputs): def _parse_string(self, inputs): """ - Validates that conditions are met to use a string as input, i.e. that there is only one input node and that node's default - input port has a label matching the provided string. If so, convert the string to an input dict and parse + Validate that conditions are met to use a string as input, i.e. that there is only one input node and that + node's default input port has a label matching the provided string. If so, convert the string to an input + dict and parse. Returns ------- @@ -7738,6 +8303,7 @@ def _validate_single_input(self, node, input): _input = [] node_variable = [input_port.defaults.value for input_port in node.input_ports if not input_port.internal_only] match_type = self._input_matches_variable(input, node_variable) + # match_type = self._input_matches_variable(input, node_variable) if match_type == 'homogeneous': # np.atleast_2d will catch any single-input ports specified without an outer list _input = convert_to_np_array(input, 2) @@ -7787,8 +8353,8 @@ def _validate_input_shapes(self, inputs): if not input_port.internal_only] err_msg = f"Input stimulus ({incompatible_stimulus}) for {node_name} is incompatible with " \ f"its external_input_values ({node_variable})." - # 8/3/17 CW: I admit the error message implementation here is very hacky; but it's at least not a hack - # for "functionality" but rather a hack for user clarity + # 8/3/17 CW: I admit the error message implementation here is very hacky; + # but it's at least not a hack for "functionality" but rather a hack for user clarity if "KWTA" in str(type(node)): err_msg = err_msg + " For KWTA mechanisms, remember to append an array of zeros " \ "(or other values) to represent the outside stimulus for " \ @@ -7805,8 +8371,8 @@ def _validate_input_shapes(self, inputs): input_lengths.remove(1) if len(input_lengths) > 1: raise CompositionError(f"The input dictionary for {self.name} contains input specifications of different " - f"lengths ({input_lengths}). The same number of inputs must be provided for each node " - f"in a Composition.") + f"lengths ({input_lengths}). The same number of inputs must be provided for each " + f"node in a Composition.") elif len(input_lengths) > 0: num_trials = list(input_lengths)[0] for mechanism in inputs_to_duplicate: @@ -7861,7 +8427,17 @@ def _flatten_nested_dicts(self, inputs): _inputs.update({node:inp}) return _inputs - def _parse_labels(self, inputs, mech=None): + def _parse_names_in_inputs(self, inputs): + names = [] + for key in inputs: + if isinstance(key, str): + names.append(key) + named_nodes = [(node, node.name) for node in self.get_nodes_by_role(NodeRole.INPUT) if node.name in names] + for node, name in named_nodes: + inputs[node] = inputs.pop(name) + return inputs + + def _parse_labels(self, inputs, mech=None, context=None): """ Traverse input dict and replace any inputs that are in the form of their input or output label representations to their numeric representations @@ -7870,14 +8446,16 @@ def _parse_labels(self, inputs, mech=None): ------- `dict` : - The input dict, with inputs in the form of their label representations replaced by their numeric representations + The input dict, with inputs with their label representations replaced by their numeric representations """ - # the nested list comp below is necessary to retrieve target nodes of learning pathways, because the PathwayRole - # enum is not importable into this module - target_to_output = {path.target: path.output for path in self.pathways if 'LEARNING' in [role.name for role in path.roles]} + # the nested list comp below is necessary to retrieve target nodes of learning pathways, + # because the PathwayRole enum is not importable into this module + target_to_output = {path.target: path.output for path in self.pathways + if 'LEARNING' in [role.name for role in path.roles]} if mech: - target_nodes_of_learning_pathways = [path.target for path in self.pathways] + target_nodes_of_learning_pathways = [path.target if path.learning_components else None + for path in self.pathways] label_type = INPUT if mech not in target_nodes_of_learning_pathways else OUTPUT label_mech = mech if mech not in target_to_output else target_to_output[mech] labels = label_mech._get_standardized_label_dict(label_type) @@ -7888,7 +8466,16 @@ def _parse_labels(self, inputs, mech=None): (target_to_output[k].output_labels_dict if k in target_to_output else k.input_labels_dict): _inputs.update({k:self._parse_labels(v, k)}) else: - _inputs.update({k:v}) + # Call _parse_labels for any Nodes with input_labels_dicts in nested Composition(s) + if (isinstance(k, Composition) + and any(n.input_labels_dict + for n in k._get_nested_nodes_with_same_roles_at_all_levels(k,NodeRole.INPUT))): + for i, port in enumerate(k.input_CIM.input_ports): + _, mech_with_labels, __ = k.input_CIM._get_destination_node_for_input_port(port) + v[i] = k._parse_labels(inputs[k][i],mech_with_labels) + _inputs.update({k:v}) + else: + _inputs.update({k:v}) elif type(inputs) == list or type(inputs) == np.ndarray: _inputs = [] for i in range(len(inputs)): @@ -7897,12 +8484,19 @@ def _parse_labels(self, inputs, mech=None): if type(stimulus) == list or type(stimulus) == np.ndarray: _inputs.append(self._parse_labels(inputs[i], mech)) elif type(stimulus) == str: - _inputs.append(labels[port][stimulus]) + if not labels: + raise CompositionError(f"Inappropriate use of str ({repr(stimulus)}) as a stimulus for " + f"{mech.name} in {self.name}: it does not have an input_labels_dict.") + try: + _inputs.append(labels[port][stimulus]) + except KeyError as e: + raise CompositionError(f"Inappropriate use of {repr(stimulus)} as a stimulus for {mech.name} " + f"in {self.name}: it is not a label in its input_labels_dict.") else: _inputs.append(stimulus) return _inputs - def _parse_dict(self, inputs): + def _parse_dict(self, inputs, context=None): """ Validates and parses a dict provided as input to a Composition into a standardized form to be used throughout its execution @@ -7916,9 +8510,10 @@ def _parse_dict(self, inputs): Number of input sets in dict for each input node in the Composition """ - # parse a user-provided input dict to format it properly for execution. compute number of input sets and return that - # as well - _inputs = self._parse_labels(inputs) + # parse a user-provided input dict to format it properly for execution. + # compute number of input sets and return that as well + _inputs = self._parse_names_in_inputs(inputs) + _inputs = self._parse_labels(_inputs) _inputs = self._validate_input_dict_node_roles(_inputs) _inputs = self._flatten_nested_dicts(_inputs) _inputs = self._validate_input_shapes(_inputs) @@ -7953,7 +8548,7 @@ def _validate_input_dict_node_roles(self, inputs): inputs[node] = node.default_external_input_values return inputs - def _parse_run_inputs(self, inputs): + def _parse_run_inputs(self, inputs, context=None): """ Takes user-provided input for entire run and parses it according to its type @@ -7983,8 +8578,8 @@ def _parse_run_inputs(self, inputs): raise CompositionError( f"Provided inputs {inputs} is in a disallowed format. Inputs must be provided in the form of " f"a dict, list, function, or generator. " - f"See https://princetonuniversity.github.io/PsyNeuLink/Composition.html#composition-run for details and " - f"formatting instructions for each input type." + f"See https://princetonuniversity.github.io/PsyNeuLink/Composition.html#composition-run " + f"for details and formatting instructions for each input type." ) return _inputs, num_inputs_sets @@ -8084,27 +8679,29 @@ def run( """Pass inputs to Composition, then execute sets of nodes that are eligible to run until termination conditions are met. - See `Composition_Execution` for details of formatting input specifications. + See `Composition_Execution` for details of formatting input specifications.\n + Use `get_input_format ` method to see example of input format.\n Use **animate** to generate a gif of the execution sequence. Arguments --------- - inputs: Dict{`INPUT` `Node ` : list}, function or generator : default None - specifies the inputs to each `INPUT` `Node ` of the Composition in each `TRIAL - ` executed during the run; see `Composition_Execution_Inputs` for additional - information about format. If **inputs** is not specified, the `default_variable - ` for each `INPUT` Node is used as its input on `TRIAL ` + inputs: Dict{`INPUT` `Node ` : list}, function or generator : default None specifies + the inputs to each `INPUT` `Node ` of the Composition in each `TRIAL ` + executed during the run (see `Composition_Execution_Inputs` for additional information about format, and + `get_input_format ` method for generating an example of the input format for + the Composition). If **inputs** is not specified, the `default_variable ` for each + `INPUT` Node is used as its input on `TRIAL `. num_trials : int : default 1 typically, the composition will infer the number of trials from the length of its input specification. - To reuse the same inputs across many trials, you may specify an input dictionary with lists of length 1, + To reuse the same inputs across many trials, an input dictionary can be specified with lists of length 1, or use default inputs, and select a number of trials with num_trials. initialize_cycle_values : Dict { Node: Node Value } : default None sets the value of specified `Nodes ` before the start of the run. All specified Nodes must be in a `cycle ` (i.e., designated with with `NodeRole` `CYCLE - `; otherwise, a warning is issued and the specification is ignored). If a Node in + `; otherwise, a warning is issued and the specification is ignored). If a Node in a cycle is not specified, it is assigned its `default values ` when initialized (see `Composition_Cycles_and_Feedback` additional details). @@ -8194,13 +8791,14 @@ def run( details and `ReportDevices` for options. animate : dict or bool : default False - specifies use of the `show_graph ` method to generate a gif movie showing the - sequence of Components executed in a run (see `example `). - A dict can be specified containing options to pass to the `show_graph ` method; - each key must be a legal argument for the `show_graph ` method, and its value a + specifies use of the `show_graph`show_graph ` method + to generate a gif movie showing the sequence of Components executed in a run + (see `example `). A dict can be specified containing + options to pass to the `show_graph ` method; each key must be a legal + argument for the `show_graph ` method, and its value a specification for that argument. The entries listed below can also be included in the dict to specify parameters of the animation. If the **animate** argument is specified simply as `True`, defaults are - used for all arguments of `show_graph ` and the options below: + used for all arguments of `show_graph ` and the options below: * *UNIT*: *EXECUTION_SET* or *COMPONENT* (default=\\ *EXECUTION_SET*\\ ) -- specifies which Components to treat as active in each call to `show_graph `. *COMPONENT* generates an @@ -8280,7 +8878,6 @@ def run( .. figure:: _static/XXX_movie.gif :alt: Animation of Composition in XXX example script - :scale: 50 % This figure shows an animation of the Composition in the XXX example script, with the `show_graph ` **show_control** argument specified as *ALL* and *UNIT* specified as *EXECUTION_SET*: @@ -8289,7 +8886,6 @@ def run( .. figure:: _static/XXX_movie.gif :alt: Animation of Composition in XXX example script - :scale: 150 % COMMENT Returns @@ -8301,23 +8897,11 @@ def run( `\(`NodeRole.OUTPUT `). .. note:: - The `results ` attribute of the Compositon contains a list of the outputs for all + The `results ` attribute of the Composition contains a list of the outputs for all trials. """ context.source = ContextFlags.COMPOSITION - # FIX 5/28/20 - # context.execution_phase = ContextFlags.PREPARING - # context.replace_flag(ContextFlags.IDLE, ContextFlags.PREPARING) - - if scheduler is None: - scheduler = self.scheduler - - if scheduling_mode is not None: - scheduler.mode = scheduling_mode - - if default_absolute_time_unit is not None: - scheduler.default_absolute_time_unit = default_absolute_time_unit for node in self.nodes: num_execs = node.parameters.num_executions._get(context) @@ -8332,15 +8916,29 @@ def run( except: self.parameters.input_specification._set(inputs, context) + # May be used by controller for specifying num_trials_per_simulation + self.num_trials = num_trials + # DS 1/7/20: Check to see if any Components are still in deferred init. If so, attempt to initialize them. # If they can not be initialized, raise a warning. self._complete_init_of_partially_initialized_nodes(context=context) + if ContextFlags.SIMULATION_MODE not in context.runmode: - self._check_projection_initialization_status() + self._check_controller_initialization_status() + self._check_nodes_initialization_status() if not skip_analyze_graph: self._analyze_graph(context=context) + if scheduler is None: + scheduler = self.scheduler + + if scheduling_mode is not None: + scheduler.mode = scheduling_mode + + if default_absolute_time_unit is not None: + scheduler.default_absolute_time_unit = default_absolute_time_unit + self._check_for_unnecessary_feedback_projections() self._check_for_nesting_with_absolute_conditions(scheduler, termination_processing) @@ -8369,7 +8967,7 @@ def run( input_nodes = self.get_nodes_by_role(NodeRole.INPUT) - inputs, num_inputs_sets = self._parse_run_inputs(inputs) + inputs, num_inputs_sets = self._parse_run_inputs(inputs, context) if num_trials is not None: num_trials = num_trials @@ -8420,8 +9018,9 @@ def run( if not valid_reset_type: raise CompositionError( - f"{reset_stateful_functions_when} is not a valid specification for reset_integrator_nodes_when of {self.name}. " - "reset_integrator_nodes_when must be a Condition or a dict comprised of {Node: Condition} pairs.") + f"{reset_stateful_functions_when} is not a valid specification for reset_integrator_nodes_when " + f"of {self.name}. reset_integrator_nodes_when must be a Condition or a dict comprised of " + f" {Node: Condition} pairs.") self._reset_stateful_functions_when_cache = {} @@ -8571,8 +9170,8 @@ def run( call_with_pruned_args(call_after_trial, context=context) # IMPLEMENTATION NOTE: - # The AFTER Run controller execution takes place here, because there's no way to tell from within the execute - # method whether or not we are at the last trial of the run. + # The AFTER Run controller execution takes place here, because there's no way to tell from within the + # execute method whether or not we are at the last trial of the run. # The BEFORE Run controller execution takes place in the execute method, # because we can't execute the controller until after setup has occurred for the Input CIM. if (self.controller_mode == AFTER and @@ -8660,7 +9259,7 @@ def learn( 1. For each pair, the key is the and the value is an input, the shape of which must match the Node's default variable. This is identical to the input dict in the `run ` method - (see `Input Dictionary ` for additional details). + (see `Composition_Input_Dictionary` for additional details). 2. A dict with keys 'inputs', 'targets', and 'epochs'. The `inputs` key stores a dict that is the same same structure as input specification (1) of learn. The `targets` and `epochs` keys should contain @@ -8675,9 +9274,10 @@ def learn( ` for additional details concerning the formatting of targets). num_trials : int (default=None) - typically, the composition will infer the number of trials from the length of its input specification. - To reuse the same inputs across many trials, you may specify an input dictionary with lists of length 1, - or use default inputs, and select a number of trials with num_trials. + typically, the Composition infers the number of trials to execute from the length of its input + specification. However, **num_trials** can be used to enforce an exact number of trials to execute; + if it is greater than there are inputs then inputs will be repeated (see `Composition_Execution_Inputs` + for additional information). epochs : int (default=1) specifies the number of training epochs (that is, repetitions of the batched input set) to run with @@ -8824,7 +9424,7 @@ def _execute_controller(self, # Animate controller (before execution) context.execution_phase = ContextFlags.CONTROL - if self._animate != False and SHOW_CONTROLLER in self._animate and self._animate[SHOW_CONTROLLER]: + if self._animate is not False and SHOW_CONTROLLER in self._animate and self._animate[SHOW_CONTROLLER]: self._animate_execution(self.controller, context) context.remove_flag(ContextFlags.CONTROL) @@ -8944,8 +9544,9 @@ def execute( Returns --------- - - output value of the final Mechanism executed in the Composition : various + output_values : List + These are the values of the Composition's output_CIM.output_ports, excluding those the source of which + are from a (potentially nested) Node with NodeRole.PROBE in its enclosing Composition. """ with Report(self, @@ -8980,7 +9581,7 @@ def execute( context=context ) - # ASSIGNMENTS ************************************************************************************************** + # ASSIGNMENTS ********************************************************************************************** if not hasattr(self, '_animate'): # These are meant to be assigned in run method; needed here for direct call to execute method @@ -9004,8 +9605,8 @@ def execute( input_nodes = self.get_nodes_by_role(NodeRole.INPUT) - # if execute was called from command line and no inputs were specified, assign default inputs to highest level - # composition (i.e. not on any nested Compositions) + # if execute was called from command line and no inputs were specified, + # assign default inputs to highest level composition (i.e. not on any nested Compositions) if not inputs and not nested and ContextFlags.COMMAND_LINE in context.source: inputs = self._validate_input_dict_node_roles({}) # Skip initialization if possible (for efficiency): @@ -9015,7 +9616,7 @@ def execute( # - its not a simulation) # - or(gym forage env is being used) # (e.g., when run is called externally repeated for the same environment) - # KAM added HACK below "or self.env is None" in order to merge in interactive inputs fix for speed improvement + # KAM added HACK below "or self.env is None" to merge in interactive inputs fix for speed improvement # TBI: Clean way to call _initialize_from_context if context has not changed, BUT composition has changed # for example: # comp.run() @@ -9128,7 +9729,7 @@ def execute( # FIX 5/28/20 context.remove_flag(ContextFlags.PREPARING) - # EXECUTE INPUT CIM ******************************************************************************************** + # EXECUTE INPUT CIM **************************************************************************************** # FIX: 6/12/19 MOVE TO EXECUTE BELOW? # Handles Input CIM and Parameter CIM execution. @@ -9136,13 +9737,13 @@ def execute( # FIX: 8/21/19 # If self is a nested composition, its input CIM will obtain its value in one of two ways, # depending on whether or not it is being executed within a simulation. - # If it is a simulation, then we need to use the _build_variable_for_input_CIM method, which parses the inputs - # argument of the execute method into a suitable shape for the input ports of the input_CIM. + # If it is a simulation, then we need to use the _build_variable_for_input_CIM method, which parses the + # inputs argument of the execute method into a suitable shape for the input ports of the input_CIM. # If it is not a simulation, we can simply execute the input CIM. # - # If self is an unnested composition, we must update the input ports for any input nodes that are Compositions. - # This is done to update the variable for their input CIMs, which allows the _adjust_execution_stimuli - # method to properly validate input for those nodes. + # If self is an unnested composition, we must update the input ports for any input nodes that are + # Compositions. This is done to update the variable for their input CIMs, which allows the + # _adjust_execution_stimuli method to properly validate input for those nodes. # -DS context.execution_phase = ContextFlags.PROCESSING @@ -9196,7 +9797,7 @@ def execute( # FIX: END context.remove_flag(ContextFlags.PROCESSING) - # EXECUTE CONTROLLER (if specified for BEFORE) ***************************************************************** + # EXECUTE CONTROLLER (if specified for BEFORE) ************************************************************* # Execute controller -------------------------------------------------------- @@ -9207,18 +9808,17 @@ def execute( # IMPLEMENTATION NOTE: # The BEFORE Run controller execution takes place here, because we can't execute the controller until after # setup has occurred for the Input CIM, whereas the AFTER Run controller execution takes place in the run - # method, because there's no way to tell from within the execute method whether or not we are at the last trial - # of the run. - if (self.controller_time_scale == TimeScale.RUN and - scheduler.get_clock(context).time.trial == 0): - self._execute_controller( - relative_order=BEFORE, - execution_mode=execution_mode, - _comp_ex=_comp_ex, - report=report, - report_num=report_num, - context=context - ) + # method, because there's no way to tell from within the execute method whether or not we are at the last + # trial of the run. + if self.controller_time_scale == TimeScale.RUN and scheduler.get_clock(context).time.trial == 0: + self._execute_controller( + relative_order=BEFORE, + execution_mode=execution_mode, + _comp_ex=_comp_ex, + report=report, + report_num=report_num, + context=context + ) elif self.controller_time_scale == TimeScale.TRIAL: self._execute_controller( relative_order=BEFORE, @@ -9229,7 +9829,7 @@ def execute( context=context ) - # EXECUTE EACH EXECUTION SET ********************************************************************************* + # EXECUTE EACH EXECUTION SET ******************************************************************************* # Begin reporting of TRIAL: # - add TRIAL header and Composition's input to output report (now that they are known) @@ -9278,11 +9878,13 @@ def execute( # SETUP EXECUTION ---------------------------------------------------------------------------- # IMPLEMENTATION NOTE KDM 1/15/20: - # call_*after*_pass is called here because we can't tell at the end of this code block whether a PASS has - # ended or not. The scheduler only modifies the pass after we receive an execution_set. So, we only know a - # PASS has ended in retrospect after the scheduler has changed the clock to indicate it. So, we have to run - # call_after_pass before the next PASS (here) or after this code block (see call to call_after_pass below) - curr_pass = execution_scheduler.get_clock(context).get_total_times_relative(TimeScale.PASS, TimeScale.TRIAL) + # call_*after*_pass is called here because we can't tell at the end of this code block whether a PASS + # has ended or not. The scheduler only modifies the pass after we receive an execution_set. So, we only + # know a PASS has ended in retrospect after the scheduler has changed the clock to indicate it. So, we + # have to run call_after_pass before the next PASS (here) or after this code block (see call to + # call_after_pass below) + curr_pass = execution_scheduler.get_clock(context).get_total_times_relative(TimeScale.PASS, + TimeScale.TRIAL) new_pass = False if curr_pass != last_pass: new_pass = True @@ -9340,7 +9942,7 @@ def execute( _comp_ex.freeze_values() # PURGE LEARNING IF NOT ENABLED ---------------------------------------------------------------- - # If learning is turned off, check for any learning related nodes and remove them from the execution set + # If learning is turned off, check for learning related nodes and remove them from the execution set if not self._is_learning(context): next_execution_set = next_execution_set - set(self.get_nodes_by_role(NodeRole.LEARNING)) @@ -9359,7 +9961,7 @@ def execute( context.execution_phase = ContextFlags.PROCESSING self._animate_execution(next_execution_set, context) - # EXECUTE EACH NODE IN EXECUTION SET ---------------------------------------------------------------------- + # EXECUTE EACH NODE IN EXECUTION SET ------------------------------------------------------------------- if execution_scheduler.mode is SchedulingMode.EXACT_TIME: # sort flattened execution set by unflattened position next_execution_set = sorted( @@ -9397,9 +9999,10 @@ def execute( execution_runtime_params = {} if node in runtime_params: - execution_runtime_params.update(self._get_satisfied_runtime_param_values(runtime_params[node], - execution_scheduler, - context)) + execution_runtime_params.update( + self._get_satisfied_runtime_param_values(runtime_params[node], + execution_scheduler, + context)) # (Re)set context.execution_phase to PROCESSING by default context.execution_phase = ContextFlags.PROCESSING @@ -9412,7 +10015,8 @@ def execute( projections = set(self.projections).intersection(set(node.path_afferents)) if any([p for p in projections if any([a for a in p.parameter_ports[MATRIX].mod_afferents - if (hasattr(a, 'learning_enabled') and a.learning_enabled in {True, ONLINE})])]): + if (hasattr(a, 'learning_enabled') + and a.learning_enabled in {True, ONLINE})])]): context.replace_flag(ContextFlags.PROCESSING, ContextFlags.LEARNING) # Execute Mechanism @@ -9519,7 +10123,6 @@ def execute( node.output_ports[i].parameters.value._set(frozen_values[node][i], context, skip_history=True, skip_log=True) - # Set all nodes to new values for node in next_execution_set: for i in range(len(node.output_ports)): @@ -9579,11 +10182,6 @@ def execute( self.output_CIM.execute(context=context) context.execution_phase = ContextFlags.IDLE - # Assign output_values - output_values = [] - for port in self.output_CIM.output_ports: - output_values.append(port.parameters.value._get(context)) - # Animate output_CIM # FIX: NOT SURE WHETHER IT CAN BE LEFT IN PROCESSING AFTER THIS - # COORDINATE WITH REFACTORING OF PROCESSING/CONTROL CONTEXT @@ -9640,9 +10238,12 @@ def execute( execution_scheduler.get_clock(context)._increment_time(TimeScale.TRIAL) - return output_values + return self.get_output_values(context) def __call__(self, *args, **kwargs): + """Execute Composition of any args are provided; else simply return results of last execution. + This allows Composition, after it has been constructed, to be run simply by calling it directly. + """ if not args and not kwargs: if self.results: return self.results[-1] @@ -9658,6 +10259,195 @@ def __call__(self, *args, **kwargs): bad_args_str = ", ".join([str(arg) for arg in args] + list(kwargs.keys())) raise CompositionError(f"Composition ({self.name}) called with illegal argument(s): {bad_args_str}") + + def get_inputs_format(self, **kwargs): + return self.get_input_format(**kwargs, alias="get_inputs_format") + + def get_input_format(self, num_trials:int=1, + use_labels:bool=False, + show_nested_input_nodes:bool=False, + alias:str=None): + """Return str with format of dict used by **inputs** argument of `run ` method. + + Arguments + --------- + + num_trials : int : default 1 + specifies number of trials' worth of inputs to included in format. + + use_labels : bool : default False + if True, shows labels instead of values for Mechanisms that have an `input_label_dict + `. For **num_trials** = 1, a representative label is + shown; for **num_trials** > 1, a different label is used for each trial shown, cycling + through the set if **num_trials** is greater than the number of labels. + + show_nested_input_nodes : bool : default False + shows hierarchical display of `Nodes ` in `nested Compositions ` + with names of destination `INPUT ` `Nodes ` and representative inputs, + followed by the actual format used for the `run ` method. + """ + + if alias: + warnings.warn(f"{alias} is aliased to get_input_format(); please use that in the future.") + + def _get_inputs(comp, nesting_level=1, use_labels=False): + + input_format = '' + indent = '\t' * nesting_level + for node in comp.get_nodes_by_role(NodeRole.INPUT): + input_format += '\n' + indent + node.name + ': ' + + # Nested Compositions + if show_nested_input_nodes and isinstance(node, Composition): + trials = _get_inputs(node, nesting_level=nesting_level + 1, use_labels=use_labels) + + # Nested Composition + else: + trials = [] + for t in range(num_trials): + + # Mechanism with labels + if use_labels and isinstance(node, Mechanism) and node.input_labels_dict: + input_values = [] + for i in range(len(node.input_values)): + label_dict = node.input_labels_dict[i] + labels = list(label_dict.keys()) + input_values.append(repr(labels[t % len(labels)])) + trial = f"[{','.join(input_values)}]" + + # Mechanism(s) with labels in nested Compositions + elif (use_labels and isinstance(node, Composition) + and any(n.input_labels_dict for n + in node._get_nested_nodes_with_same_roles_at_all_levels(node, NodeRole.INPUT))): + input_values = [] + for i, port in enumerate(node.input_CIM.input_ports): + _, mech, __ = node.input_CIM._get_destination_node_for_input_port(port) + labels_dict = mech.input_labels_dict + if labels_dict: + labels = list(labels_dict[0].keys()) + input_values.append(repr([labels[t % len(labels)]])) + else: + input_values.append(repr(np.array(mech.input_values).tolist())) + trial = f"[{','.join(input_values)}]" + + # No Mechanism(s) with labels or use_labels == False + else: + trial = f"[{','.join([repr(i.tolist()) for i in node.input_values])}]" + + trials.append(trial) + + trials = ', '.join(trials) + if num_trials > 1: + trials = f"[ {trials} ]" + + input_format += trials + if not show_nested_input_nodes: + input_format += ',' + nesting_level -= 1 + return input_format + + formatted_input = _get_inputs(self, 1, use_labels) + if show_nested_input_nodes: + preface = f"\nInputs to (nested) INPUT Nodes of {self.name} for {num_trials} trials:" + epilog = f"\n\nFormat as follows for inputs to run():\n" \ + f"{self.get_input_format(num_trials=num_trials)}" + return preface + formatted_input[:-1] + epilog + return '{' + formatted_input[:-1] + '\n}' + + def get_output_format(self, **kwargs): + return self.get_results_by_nodes(**kwargs, alias="get_output_format") + + def get_result_format(self, **kwargs): + return self.get_results_by_nodes(**kwargs, alias="get_result_format") + + def get_results_format(self, **kwargs): + return self.get_results_by_nodes(**kwargs, alias="get_results_format") + + def get_results_for_node(self, **kwargs): + return self.get_results_by_nodes(**kwargs, alias="get_results_for_node") + + def get_results_for_nodes(self, **kwargs): + return self.get_results_by_nodes(**kwargs, alias="get_results_for_nodes") + + def get_results_by_node(self, **kwargs): + return self.get_results_by_nodes(**kwargs, alias="get_results_by_node") + + def get_results_by_nodes(self, + nodes:Union[Mechanism, list]=None, + use_names:bool=False, + use_labels:bool=False, + alias:str=None): + """Return ordered dict with origin Node and current value of each item in results. + + .. note:: + Items are listed in the order of their values in the Composition's `results ` attribute, + irrespective of the order in which they appear in the **nodes** argument if specified. + + Arguments + --------- + + nodes : List[Mechanism or str], Mechanism or str : default None + specifies `Nodes ` for which to report the value; can be a reference to a Mechanism, + its name, or a list of either or both. If None (the default), the `values ` of + all `OUTPUT ` Nodes are reported. + + use_names : bool : default False + specifies whether to use the names of `Nodes ` rather than references to them as keys. + + use_labels : bool : default False + specifies whether to use labels to report the `values ` of Nodes for `Mechanisms + Mechanism` that have an `output_labels_dict ` attribute. + + Returns + ------- + + Node output_values : Dict[Mechanism:value] + dict , the keys of which are either Mechanisms or the names of them, and values are their + `output_values `. + """ + + if alias: + warnings.warn(f"{alias} is aliased to get_results_by_nodes(); please use that in the future.") + + # Get all OUTPUT Nodes in (nested) Composition(s) + output_nodes = [self.output_CIM._get_source_node_for_output_port(port)[1] + for port in self.output_CIM.output_ports] + + # Get all values for all OUTPUT Nodes + if use_labels: + # Get labels for corresponding values + values = [node.output_labels for node in output_nodes] + else: + values = self.results[-1] or self.output_values + + full_output_set = zip(output_nodes, values) + + nodes = convert_to_list(nodes) + # Translate any Node names to object references + if nodes: + bad_nodes = [] + for i, node in enumerate(nodes.copy()): + if node in output_nodes: + continue + if isinstance(node, str): + nodes[i] = next((n for n in output_nodes if n.name == node),None) + if nodes[i]: + continue + bad_nodes.append(node) + raise CompositionError(f"Nodes specified in get_results_by_nodes() method not found in {self.name} " + f"nor any Compositions nested within it: {bad_nodes}") + + # Use nodes if specified, else all OUTPUT Nodes + nodes = nodes or output_nodes + # Get Nodes and values for ones specified in Nodes (all by default) + result_set = [(n,v) for n, v in full_output_set if n in nodes] + + if use_names: + # Use names of Nodes + return {k.name:np.array(v).tolist() for k,v in result_set} + else: + return {k:np.array(v).tolist() for k,v in result_set} + def _update_learning_parameters(self, context): pass @@ -9789,7 +10579,14 @@ def _build_variable_for_input_CIM(self, inputs): origin_node = origin_node.composition if origin_node in inputs: + # MODIFIED 12/19/21 OLD: value = inputs[origin_node][index] + # # MODIFIED 12/19/21 NEW: + # # MODIFIED 12/19/21 END + # if origin_node.input_labels_dict: + # labels = origin_node.input_labels_dict + # else: + # value = inputs[origin_node][index] else: value = origin_node.defaults.variable[index] @@ -9919,8 +10716,10 @@ def _delete_contexts(self, *contexts, check_simulation_storage=False, visited=No except AttributeError: self.scheduler._delete_counts(c) + # endregion EXECUTION + # ****************************************************************************************************************** - # LLVM + # region -------------------------------------- LLVM --------------------------------------------------------------- # ****************************************************************************************************************** @property @@ -10084,45 +10883,54 @@ def _dict_summary(self): }] } + # endregion LLVM + # ****************************************************************************************************************** - # PROPERTIES + # region ----------------------------------- PROPERTIES ------------------------------------------------------------ # ****************************************************************************************************************** @property def input_ports(self): - """Returns all InputPorts that belong to the Input CompositionInterfaceMechanism""" + """Return all InputPorts that belong to the Input CompositionInterfaceMechanism""" return self.input_CIM.input_ports @property def input_port(self): - """Returns the index 0 InputPort that belongs to the Input CompositionInterfaceMechanism""" + """Return the index 0 InputPort that belongs to the Input CompositionInterfaceMechanism""" return self.input_CIM.input_ports[0] @property def input_values(self): - """Returns values of all InputPorts that belong to the Input CompositionInterfaceMechanism""" + """Return values of all InputPorts that belong to the Input CompositionInterfaceMechanism""" return self.get_input_values() def get_input_values(self, context=None): return [input_port.parameters.value.get(context) for input_port in self.input_CIM.input_ports] + @property + def output_port(self): + """Return the index 0 OutputPort that belongs to the Output CompositionInterfaceMechanism""" + return self.output_CIM.output_ports[0] + @property def output_ports(self): - """Returns all OutputPorts that belong to the Output CompositionInterfaceMechanism""" + """Return all OutputPorts that belong to the Output CompositionInterfaceMechanism""" return self.output_CIM.output_ports @property def output_values(self): - """Returns values of all OutputPorts that belong to the Output CompositionInterfaceMechanism in the most recently executed context""" + """Return values of all OutputPorts that belong to the Output CompositionInterfaceMechanism in the most recently executed context""" return self.get_output_values(self.most_recent_context) def get_output_values(self, context=None): - return [output_port.parameters.value.get(context) for output_port in self.output_CIM.output_ports] + return [output_port.parameters.value.get(context) + for output_port in self.output_CIM.output_ports + if (not self.output_CIM._sender_is_probe(output_port) or self.include_probes_in_output)] - # @property - # def mechanisms(self): - # return MechanismList(self, [mech for mech in self.nodes - # if isinstance(mech, Mechanism)]) + @property + def shadowing_dict(self): + """Return dict with shadowing ports as the keys and the ports they shadow as values.""" + return {port:port.shadow_inputs for node in self._all_nodes for port in node.input_ports if port.shadow_inputs} @property def mechanisms(self): @@ -10158,7 +10966,7 @@ def external_input_values(self): @property def default_external_input_values(self): - """Returns the default values of all external InputPorts that belong to the + """Return the default values of all external InputPorts that belong to the Input CompositionInterfaceMechanism """ @@ -10191,11 +10999,6 @@ def stateful_nodes(self): return stateful_nodes - @property - def output_port(self): - """Returns the index 0 OutputPort that belongs to the Output CompositionInterfaceMechanism""" - return self.output_CIM.output_ports[0] - @property def class_parameters(self): return self.__class__.parameters @@ -10204,6 +11007,11 @@ def class_parameters(self): def stateful_parameters(self): return [param for param in self.parameters if param.stateful] + @property + def random_variables(self): + """Return list of Components with seed Parameters (i.e., ones that that call a random function).""" + return [param._owner._owner for param in self.all_dependent_parameters('seed').keys()] + @property def _dependent_components(self): return list(itertools.chain( @@ -10266,6 +11074,12 @@ def _all_nodes(self): if self.controller: yield self.controller + # endregion PROPERTIES + + # ****************************************************************************************************************** + # region ----------------------------------- SHOW_GRAPH ------------------------------------------------------------ + # ****************************************************************************************************************** + def show_graph(self, show_node_structure=False, show_nested=NESTED, @@ -10277,6 +11091,7 @@ def show_graph(self, show_types=False, show_dimensions=False, show_projection_labels=False, + show_projections_not_in_composition=False, active_items=None, output_fmt='pdf', context=None): @@ -10290,6 +11105,7 @@ def show_graph(self, show_types=show_types, show_dimensions=show_dimensions, show_projection_labels=show_projection_labels, + show_projections_not_in_composition=show_projections_not_in_composition, active_items=active_items, output_fmt=output_fmt, context=context) @@ -10300,6 +11116,8 @@ def _set_up_animation(self, context): def _animate_execution(self, active_items, context): self._show_graph._animate_execution(active_items, context) + # endregion SHOW_GRAPH + def get_compositions(): """Return list of Compositions in caller's namespace.""" diff --git a/psyneulink/core/compositions/compositionfunctionapproximator.py b/psyneulink/core/compositions/compositionfunctionapproximator.py index 2d3739799d8..208dcbf4ac1 100644 --- a/psyneulink/core/compositions/compositionfunctionapproximator.py +++ b/psyneulink/core/compositions/compositionfunctionapproximator.py @@ -32,8 +32,9 @@ Its `evaluate ` method calls its `function ` to generate and return the predicted `net_outcome ` for a given set of `state_feature_values -`, `control_allocation ` and -`num_estimates ` +`, `control_allocation `, +`num_estimates `, and `num_trials_per_estimate +`. COMMENT: .. note:: @@ -53,6 +54,7 @@ """ from psyneulink.core.compositions.composition import Composition +from psyneulink.core.globals.keywords import COMPOSITION_FUNCTION_APPROXIMATOR from psyneulink.core.globals.context import Context __all__ = ['CompositionFunctionApproximator'] @@ -65,11 +67,11 @@ def __init__(self, error_value): class CompositionFunctionApproximator(Composition): """Subclass of `Composition` that implements a FunctionApproximator as the `agent_rep - ` of an `OptimizationControlmechanism`. + ` of an `OptimizationControlMechanism`. Parameterizes `its function ` to predict a `net_outcome - ` for a set of `state_feature_values ` - and a `control_allocation ` provided by an `OptimizationControlmechanism`. + ` for a set of `state_feature_values ` + and a `control_allocation ` provided by an `OptimizationControlMechanism`. See `Composition ` for additional arguments and attributes. @@ -101,11 +103,17 @@ class CompositionFunctionApproximator(Composition): """ + componentCategory = COMPOSITION_FUNCTION_APPROXIMATOR + def __init__(self, name=None, **param_defaults): # self.function = function super().__init__(name=name, **param_defaults) - def adapt(self, feature_values, control_allocation, net_outcome, context=None): + def adapt(self, + feature_values, + control_allocation, + net_outcome, + context=None): """Adjust parameters of `function ` to improve prediction of `target ` from `input `. """ @@ -113,11 +121,17 @@ def adapt(self, feature_values, control_allocation, net_outcome, context=None): format(CompositionFunctionApproximator.__name__, self.__class__.__name__, repr('adapt'))) - def evaluate(self, feature_values, control_allocation, num_estimates, base_context=Context(execution_id=None), context=None): + def evaluate(self, + feature_values, + control_allocation, + num_estimates, + num_trials_per_estimate, + base_context=Context(execution_id=None), + context=None): """Return `target ` predicted by `function for **input**, using current set of `prediction_parameters `. """ - # FIX: AUGMENTTO USE num_estimates + # FIX: AUGMENT TO USE num_estimates and num_trials_per_estimate return self.function(feature_values, control_allocation, context=context) @property diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py new file mode 100644 index 00000000000..339fc99cb67 --- /dev/null +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -0,0 +1,603 @@ +# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy of the License at: +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and limitations under the License. + + +# ************************************* ParameterEstimationComposition ************************************************ + +# FIX: SEED FOR noise PARAMETER OF TransferMechanism GETS ASSIGNED TO THE MECHANISM, +# BUT THERE DOES NOT SEEM TO BE A PARAMETER PORT ASSIGNED TO IT FOR THAT +# FIX: ADD Parameters Class DEFINITION FOR ParameterEstimationComposition +# FIX: CHANGE REFERENCES TO <`parameter ` values> AND THE LIKE TO +# <`parameter values `> +# FIX: ADD TESTS: +# - FOR ERRORS IN parameters AND outcome_variables SPECIFICATIONS +# - GENERATES CORRECT SEED ITERATOR, control_signals AND THEIR projections +# - EVENTUALLY, EXECUTION IN BOTH DATA FITTING AND OPTIMIZATION MODES +# FIX: SHOULD PASS ANY ARGS OF RUN METHOD (OTHER THAN num_trial) TO evaluate METHOD OF model COMPOSITION +# NUM_TRIALS?) + +""" + +Contents +-------- + + * `ParameterEstimationComposition_Overview` + * `ParameterEstimationComposition_Data_Fitting` + * `ParameterEstimationComposition_Optimization` + * `ParameterEstimationComposition_Supported_Optimizers` + * `ParameterEstimationComposition_Class_Reference` + + +.. _ParameterEstimationComposition_Overview: + +Overview +-------- + +COMMENT: + ADD MENTION THAT THIS ALLOWS FITTING AND OPTIMIZING "LIKELIHOOD-FREE" MODELS. +COMMENT + +A `ParameterEstimationComposition` is a subclass of `Composition` that is used to estimate specified `parameters +` of a `model ` Composition, +in order to fit the `outputs ` +of the `model ` to a set of data (`ParameterEstimationComposition_Data_Fitting`), +or to optimize its `net_outcome ` according to an `objective_function` +(`ParameterEstimationComposition_Optimization`). In either case, when the ParameterEstimationComposition is `run +` with a given set of `inputs `, it returns the set of +parameter values in its `optimized_parameter_values ` +attribute that it estimates best satisfy either of those conditions, and the results of running the `model +` with those parameters in its `results ` +attribute. The arguments below are the primary ones used to configure a ParameterEstimationComposition for either +`ParameterEstimationComposition_Data_Fitting` or `ParameterEstimationComposition_Optimization`), followed by +sections that describe arguments specific to each. + + .. _ParameterEstimationComposition_Model: + + * **model** - this is a convenience argument that can be used to specify a `Composition` other than the + ParameterEstimationComposition itself as the model. Alternatively, the model to be fit can be constructed + within the ParameterEstimationComposition itself, using the **nodes** and/or **pathways** arguments of its + constructor (see `Composition_Constructor` for additional details). The **model** argument + or the **nodes**, **pathways**, and/or **projections** arguments must be specified, but not both. + + .. note:: + Neither the **controller** nor any of its associated arguments can be specified in the constructor for a + ParameterEstimationComposition; this is constructed automatically using the arguments described below. + + * **parameters** - specifies the parameters of the `model ` to be + estimated. These are specified in a dict, in which the key of each entry specifies a parameter to estimate, + and its value either a range of values to sample for that parameter or a distribution from which to draw them. + + * **outcome_variables** - specifies the `OUTPUT` `Nodes ` of the `model + `, the `values ` of which are used + to evaluate the fit of the different combination of parameter values sampled. + + * **num_estimates** - specifies the number of independent samples that are estimated for a given combination of + parameter values. + + +.. _ParameterEstimationComposition_Data_Fitting: + +Data Fitting +------------ + +The ParameterEstimationComposition can be used to find a set of parameters for the `model +` such that, when it is run with a given set of inputs, its results +best match a specified set of empirical data. This requires the following additional arguments to be specified: + + .. _ParameterEstimationComposition_Data: + + * **data** - specifies the data to which the `outcome_variables ` + are fit in the estimation process. They must be in a format that aligns the specification of + `outcome_variables `. + COMMENT: + FIX: GET MORE FROM DAVE HERE + COMMENT + + * **optimization_function** - specifies the function used to compare the `values ` of the + `outcome_variables ` with the **data**, and search over values + of the `parameters ` that maximize the fit. This must be either + `ParameterEstimationFunction` or a subclass of that. By default, ParameterEstimationFunction uses maximum + likelihood estimation (MLE) to compare the `outcome_variables ` + and the data, and + COMMENT: + FIX: GET MORE FROM DAVE HERE + COMMENT + for searching over parameter combinations. + +.. _ParameterEstimationComposition_Optimization: + +Parameter Optimization +---------------------- + + .. _ParameterEstimationComposition_Objective_Function: + + * **objective_function** - specifies a function used to evaluate the `values ` of the + `outcome_variables `, according to which combinations of + `parameters ` are assessed. The shape of the `variable + ` of the `objective_function (i.e., its first positional argument) must be the same as an + array containing the `value ` of the OutputPort corresponding to each item specified in + `outcome_variables `. + + * **optimization_function** - specifies the function used to search over values of the `parameters + ` in order to optimize the **objective_function**. It can be any + `OptimizationFunction` that accepts an `objective_function ` as an argument or specifies + one by default. By default `GridSearch` is used which exhaustively evaluates all combinations of `parameter + ` values, and returns the set that either maximizes or minimizes the + **objective_function**. + +.. _ParameterEstimationComposition_Supported_Optimizers: + +Supported Optimizers +-------------------- + +TBD + +.. _ParameterEstimationComposition_Class_Reference: + +Class Reference +--------------- + +""" + +from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import \ + OptimizationControlMechanism +from psyneulink.core.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism +from psyneulink.core.components.ports.modulatorysignals.controlsignal import ControlSignal +from psyneulink.core.compositions.composition import Composition +from psyneulink.core.globals.context import Context, ContextFlags, handle_external_context +from psyneulink.core.globals.keywords import BEFORE +from psyneulink.core.globals.parameters import Parameter + +__all__ = ['ParameterEstimationComposition'] + +COMPOSITION_SPECIFICATION_ARGS = {'nodes', 'pathways', 'projections'} +CONTROLLER_SPECIFICATION_ARGS = {'controller', + 'enable_controller', + 'controller_mode', + 'controller_time_scale', + 'controller_condition', + 'retain_old_simulation_data'} + + +class ParameterEstimationCompositionError(Exception): + def __init__(self, error_value): + self.error_value = error_value + + +def _initial_seed_getter(owning_component, context=None): + try: + return owning_component.controler.parameters.initial_seed._get(context) + except: + return None + +def _initial_seed_setter(value, owning_component, context=None): + owning_component.controler.parameters.initial_seed.set(value, context) + return value + +def _same_seed_for_all_parameter_combinations_getter(owning_component, context=None): + try: + return owning_component.controler.parameters.same_seed_for_all_allocations._get(context) + except: + return None + +def _same_seed_for_all_parameter_combinations_setter(value, owning_component, context=None): + owning_component.controler.parameters.same_seed_for_all_allocations.set(value, context) + return value + + +class ParameterEstimationComposition(Composition): + """ + Composition( \ + parameters, + outcome_variables, + model=None, + data=None, + objective_function=None, + optimization_function=None, + num_estimates=1, + number_trials_per_estimate=None, + initial_seed=None, + same_seed_for_all_parameter_combinations=False + ) + + Subclass of `Composition` that estimates specified parameters either to fit the results of a Composition + to a set of data or to optimize a specified function. + + Automatically implements an `OptimizationControlMechanism` as its `controller `, + that is constructed using arguments to the ParameterEstimationComposition's constructor as described below. + + The following arguments are those specific to ParmeterEstimationComposition; see `Composition` for additional + arguments + + Arguments + --------- + + parameters : dict[Parameter:Union[Iterator, Function, List, value] + specifies the parameters of the `model ` used for + `ParameterEstimationComposition_Data_Fitting` or `ParameterEstimationComposition_Optimization`, and either + the range of values to be evaluated for each parameter, or priors that define a distribution over those. + + outcome_variables : list[Composition output nodes] + specifies the `OUTPUT` `Nodes ` of the `model `, + the `values ` of which are either compared to a specified **data** when the + ParameterEstimationComposition is used for `ParameterEstimationComposition_Data_Fitting`, or used by the + `optimization_function ` for + `ParameterEstimationComposition_Optimization`. + + model : Composition : default None + specifies an external `Composition` for which parameters are to be `fit to data + ` or `optimized ` + according to a specified `objective_function `. + If **model** is None (the default), the ParameterEstimationComposition itself is used (see + `model ` for additional information). + + data : array : default None + specifies the data to to be fit when the ParameterEstimationComposition is used for + `ParameterEstimationComposition_Data_Fitting`; structure must conform to format of + **outcome_variables** (see `data ` for additional information). + + objective_function : ObjectiveFunction, function or method + specifies the function used to evaluate the `net_outcome ` of the `model + ` when the ParameterEstimationComposition is used for + `ParameterEstimationComposition_Optimization` (see `objective_function + ` for additional information). + + optimization_function : OptimizationFunction, function or method + specifies the function used to evaluate the `fit to data ` + or `optimize ` the parameters of the `model + ` according to a specified `objective_function + `; the shape of its `variable ` of the + `objective_function (i.e., its first positional argument) must be the same as an array containing the `value + ` of the OutputPort corresponding to each item specified in `outcome_variables + `. + + num_estimates : int : default 1 + specifies the number of estimates made for a each combination of `parameter ` + values (see `num_estimates ` for additional information); + it is passed to the ParameterEstimationComposition's `controller ` to set its + `num_estimates ` Parameter. + + num_trials_per_estimate : int : default None + specifies an exact number of trials to execute for each run of the `model + ` when estimating each combination of `parameter + ` values (see `num_trials_per_estimate + ` for additional information). + + initial_seed : int : default None + specifies the seed used to initialize the random number generator at construction; it is passed to the + ParameterEstimationComposition's `controller ` to set its `initial_seed + ` Parameter. + + same_seed_for_all_parameter_combinations : bool : default False + specifies whether the random number generator is re-initialized to the same value when estimating each + combination of `parameter ` values; it is passed to the + ParameterEstimationComposition's `controller ` to set its + `same_seed_for_all_allocations ` Parameter. + + + Attributes + ---------- + + model : Composition + identifies the `Composition` used for `ParameterEstimationComposition_Data_Fitting` or + `ParameterEstimationComposition_Optimization`. If the **model** argument of the + ParameterEstimationComposition's constructor is not specified, `model` returns the + ParameterEstimationComposition itself. + + parameters : list[Parameters] + determines the parameters of the `model ` used for + `ParameterEstimationComposition_Data_Fitting` or `ParameterEstimationComposition_Optimization` + (see `control ` for additional details). + + parameter_ranges_or_priors : List[Union[Iterator, Function, ist or Value] + determines the range of values evaluated for each `parameter `. + These are assigned as the `allocation_samples ` for the `ControlSignal` + assigned to the ParameterEstimationComposition's `OptimizationControlMechanism` corresponding to each of the + specified `parameters `. + + outcome_variables : list[Composition Output Nodes] + determines the `OUTPUT` `Nodes ` of the `model `, + the `values ` of which are either compared to the **data** when the + ParameterEstimationComposition is used for `ParameterEstimationComposition_Data_Fitting`, or evaluated by the + ParameterEstimationComposition's `optimization_function ` + when it is used for `ParameterEstimationComposition_Optimization`. + + data : array + determines the data to be fit by the `model ` when the + ParameterEstimationComposition is used for `ParameterEstimationComposition_Data_Fitting`. + These must be structured in form that aligns with the specified `outcome_variables + ` (see `data + ` for additional details). The data are passed to the optimizer + used by `optimization_function `. Returns + None if the model is being used for `ParameterEstimationComposition_Optimization`. + + objective_function : ObjectiveFunction, function or method + determines the function used to evaluate the `results ` of the `model + ` under each set of `parameter + ` values when the ParameterEstimationComposition is used for + `ParameterEstimationComposition_Optimization`. It is passed to the ParameterEstimationComposition's + `OptimizationControlMechanism` as the function of its `objective_mechanism + `, that is used to compute the `net_outcome + ` for of the `model ` each time it is + `run ` (see `objective_function ` + for additional details). + + optimization_function : OptimizationFunction + determines the function used to estimate the parameters of the `model ` + that either best fit the `data ` when the ParameterEstimationComposition + is used for `ParameterEstimationComposition_Data_Fitting`, or that achieve some maximum or minimum value of + the the `optimization_function ` when the + ParameterEstimationComposition is used for `ParameterEstimationComposition_Optimization`. This is assigned as + the `function ` of the ParameterEstimationComposition's + `OptimizationControlMechanism`. + + num_estimates : int + determines the number of estimates of the `net_outcome ` of the `model + ` (i.e., number of calls to its `evaluate ` + method) for a given combination of `parameter ` values (i.e., + `control_allocation `) evaluated. + + num_trials_per_estimate : int or None + imposes an exact number of trials to be executed in each run of `model ` + used to evaluate its `net_outcome ` by a call to its + OptimizationControlMechanism's `evaluate_agent_rep ` method. + If it is None (the default), then either the number of **inputs** or the value specified for **num_trials** in + the ParameterEstimationComposition's `run ` method used to determine the + number of trials executed (see `number of trials ` for additional + information). + + .. _note:: + The **num_trials_per_estimate** is distinct from the **num_trials** argument of the + ParameterEstimationComposition's `run ` method. The latter determines how many full fits + of the `model ` are carried out (that is, how many times the + ParameterEstimationComposition *itself* is run), whereas **num_trials_per_estimate** determines how many + trials are run for a given combination of `parameter ` values + *within* each fit. + + initial_seed : int or None + contains the seed used to initialize the random number generator at construction, that is stored on the + ParameterEstimationComposition's `controller `, and setting it sets the value + of that Parameter (see `initial_seed ` for additional details). + + same_seed_for_all_parameter_combinations : bool + contains the setting for determining whether the random number generator used to select seeds for each + estimate of the `model `\\'s `net_outcome + ` is re-initialized to the same value for each combination of `parameter + ` values evaluated. Its values is stored on the + ParameterEstimationComposition's `controller `, and setting it sets the value + of that Parameter (see `same_seed_for_all_allocations + ` for additional details). + + optimized_parameter_values : list + contains the values of the `parameters ` of the `model + ` that best fit the `data ` when + the ParameterEstimationComposition is used for `ParameterEstimationComposition_Data_Fitting`, + or that optimize performance of the `model ` according to the + `optimization_function ` when the + ParameterEstimationComposition is used for `ParameterEstimationComposition_Optimization`. If `parameter values + ` are specified as ranges of values, then + each item of `optimized_parameter_values` is the optimized value of the corresponding `parameter + `. If `parameter values + ` are specified as priors, then each item of + `optimized_parameter_values` is an array containing the values of the corresponding `parameter + ` the distribution of which were determined to be optimal. + + results : list[list[list]] + contains the `output_values ` of the `OUTPUT` `Nodes ` + in the `model ` for every `TRIAL ` executed (see + `Composition.results` for more details). If the ParameterEstimationComposition is used for + `ParameterEstimationComposition_Data_Fitting`, and `parameter values + ` are specified as ranges of values, then + each item of `results ` is an array of `output_values ` + (sampled over `num_estimates `) obtained for the single + optimized combination of `parameter ` values contained in the + corresponding item of `optimized_parameter_values `. + If `parameter values ` are specified as priors, + then each item of `results` is an array of `output_values ` (sampled over + `num_estimates `), each of which corresponds to a combination + of `parameter ` values that were used to generate those results; + it is the *distribution* of those `parameter ` values that were + found to best fit the data. + """ + + class Parameters(Composition.Parameters): + """ + Attributes + ---------- + + initial_seed + see `input_specification ` + + :default value: None + :type: ``int`` + + same_seed_for_all_parameter_combinations + see `input_specification ` + + :default value: False + :type: ``bool`` + + """ + # FIX: 11/32/21 CORRECT INITIAlIZATIONS? + initial_seed = Parameter(None, loggable=False, pnl_internal=True, + getter=_initial_seed_getter, + setter=_initial_seed_setter) + same_seed_for_all_parameter_combinations = Parameter(False, loggable=False, pnl_internal=True, + getter=_same_seed_for_all_parameter_combinations_getter, + setter=_same_seed_for_all_parameter_combinations_setter) + + @handle_external_context() + def __init__(self, + parameters, # OCM control_signals + outcome_variables, # OCM monitor_for_control + optimization_function, # function of OCM + model=None, + data=None, # arg of OCM function + objective_function=None, # function of OCM ObjectiveMechanism + num_estimates=1, # num seeds per parameter combination (i.e., of OCM allocation_samples) + num_trials_per_estimate=None, # num trials per run of model for each combination of parameters + initial_seed=None, + same_seed_for_all_parameter_combinations=None, + name=None, + context=None, + **kwargs): + + self._validate_params(locals()) + + # Assign model + if model: + # If model has been specified, assign as (only) node in PEC, otherwise specification(s) in kwargs are used + # (Note: _validate_params() ensures that either model or nodes and/or pathways are specified, but not both) + kwargs.update({'nodes':model}) + self.model = model or self + + self.optimized_parameter_values = [] + + super().__init__(name=name, + controller_mode=BEFORE, + enable_controller=True, + **kwargs) + + context = Context(source=ContextFlags.COMPOSITION, execution_id=None) + + # Implement OptimizationControlMechanism and assign as PEC controller + # (Note: Implement after Composition itself, so that: + # - Composition's components are all available (limits need for deferred_inits) + # - search for seed params in _instantiate_ocm doesn't include pem itself or its functions) + ocm = self._instantiate_ocm(parameters=parameters, + outcome_variables=outcome_variables, + data=data, + objective_function=objective_function, + optimization_function=optimization_function, + num_estimates=num_estimates, + num_trials_per_estimate=num_trials_per_estimate, + initial_seed=initial_seed, + same_seed_for_all_parameter_combinations=same_seed_for_all_parameter_combinations, + context=context) + + self.add_controller(ocm, context) + + def _validate_params(self, args): + + kwargs = args.pop('kwargs') + pec_name = f"{self.__class__.__name__} '{args.pop('name',None)}'" or f'a {self.__class__.__name__}' + + # FIX: 11/3/21 - WRITE TESTS FOR THESE ERRORS IN test_parameter_estimation_composition.py + + # Must specify either model or a COMPOSITION_SPECIFICATION_ARGS + if not (args['model'] or [arg for arg in kwargs if arg in COMPOSITION_SPECIFICATION_ARGS]): + # if not ((args['model'] or args['nodes']) for arg in kwargs if arg in COMPOSITION_SPECIFICATION_ARGS): + raise ParameterEstimationCompositionError(f"Must specify either 'model' or the " + f"'nodes', 'pathways', and/or `projections` ars " + f"in the constructor for {pec_name}.") + + # Can't specify both model and COMPOSITION_SPECIFICATION_ARGUMENTS + # if (args['model'] and [arg for arg in kwargs if arg in COMPOSITION_SPECIFICATION_ARGS]): + if args['model'] and kwargs.pop('nodes',None): + raise ParameterEstimationCompositionError(f"Can't specify both 'model' and the " + f"'nodes', 'pathways', or 'projections' args " + f"in the constructor for {pec_name}.") + + # Disallow specification of PEC controller args + ctlr_spec_args_found = [arg for arg in CONTROLLER_SPECIFICATION_ARGS if arg in list(kwargs.keys())] + if ctlr_spec_args_found: + plural = len(ctlr_spec_args_found) > 1 + raise ParameterEstimationCompositionError(f"Cannot specify the following controller arg" + f"{'s' if plural else ''} for {pec_name}: " + f"'{', '.join(ctlr_spec_args_found)}'; " + f"{'these are' if plural else 'this is'} " + f"set automatically.") + + # Disallow simultaneous specification of + # data (for data fitting; see _ParameterEstimationComposition_Data_Fitting) + # and objective_function (for optimization; see _ParameterEstimationComposition_Optimization) + if args['data'] and args['objective_function']: + raise ParameterEstimationCompositionError(f"Both 'data' and 'objective_function' args were " + f"specified for {pec_name}; must choose one " + f"('data' for fitting or 'objective_function' for optimization).") + + def _instantiate_ocm(self, + parameters, + outcome_variables, + data, + objective_function, + optimization_function, + num_estimates, + num_trials_per_estimate, + initial_seed, + same_seed_for_all_parameter_combinations, + context=None + ): + + # # Parse **parameters** into ControlSignals specs + control_signals = [] + for param, allocation in parameters.items(): + control_signals.append(ControlSignal(modulates=param, + allocation_samples=allocation)) + + # If objective_function has been specified, create and pass ObjectiveMechanism to ocm + objective_mechanism = ObjectiveMechanism(monitor=outcome_variables, + function=objective_function) if objective_function else None + + # FIX: NEED TO BE SURE CONSTRUCTOR FOR MLE optimization_function HAS data ATTRIBUTE + if data: + optimization_function.data = data + + return OptimizationControlMechanism( + agent_rep=self, + monitor_for_control=outcome_variables, + allow_probes=True, + objective_mechanism=objective_mechanism, + function=optimization_function, + control_signals=control_signals, + num_estimates=num_estimates, + num_trials_per_estimate=num_trials_per_estimate, + initial_seed=initial_seed, + same_seed_for_all_allocations=same_seed_for_all_parameter_combinations, + context=context + ) + + # def run(self): + # # FIX: IF DATA WAS SPECIFIED, CHECK THAT INPUTS ARE APPROPRIATE FOR THOSE DATA. + # # FIX: THESE ARE THE PARAMS THAT SHOULD PROBABLY BE PASSED TO THE model COMP FOR ITS RUN: + # # inputs=None, + # # initialize_cycle_values=None, + # # reset_stateful_functions_to=None, + # # reset_stateful_functions_when=Never(), + # # skip_initialization=False, + # # clamp_input=SOFT_CLAMP, + # # runtime_params=None, + # # call_before_time_step=None, + # # call_after_time_step=None, + # # call_before_pass=None, + # # call_after_pass=None, + # # call_before_trial=None, + # # call_after_trial=None, + # # termination_processing=None, + # # scheduler=None, + # # scheduling_mode: typing.Optional[SchedulingMode] = None, + # # execution_mode:pnlvm.ExecutionMode = pnlvm.ExecutionMode.Python, + # # default_absolute_time_unit: typing.Optional[pint.Quantity] = None, + # # FIX: ADD DOCSTRING THAT EXPLAINS HOW TO RUN FOR DATA FITTING VS. OPTIMIZATION + # pass + + # def evaluate(self, + # feature_values, + # control_allocation, + # num_estimates, + # num_trials_per_estimate, + # execution_mode=None, + # base_context=Context(execution_id=None), + # context=None): + # """Return `model ` predicted by `function for + # **input**, using current set of `prediction_parameters `. + # """ + # # FIX: THE FOLLOWING MOSTLY NEEDS TO BE HANDLED BY OptimizationFunction.evaluate_agent_rep AND/OR grid_evaluate + # # FIX: THIS NEEDS TO BE A DEQUE THAT TRACKS ALL THE CONTROL_SIGNAL VALUES OVER num_estimates FOR PARAM DISTRIB + # # FIX: AUGMENT TO USE num_estimates and num_trials_per_estimate + # # FIX: AUGMENT TO USE same_seed_for_all_parameter_combinations PARAMETER + # return self.function(feature_values, control_allocation, context=context) diff --git a/psyneulink/core/compositions/pathway.py b/psyneulink/core/compositions/pathway.py index 0d1f79303c8..951385a36bc 100644 --- a/psyneulink/core/compositions/pathway.py +++ b/psyneulink/core/compositions/pathway.py @@ -101,11 +101,12 @@ .. .. _Pathway_Specification_List: - * **list**: [`Node `, <`Projection `,> `Node `...] -- + * **list**: [`Node `, <`Projection(s) `,> `Node `...] -- each item of the list must be a `Node ` -- i.e., Mechanism or Composition, or a (`Mechanism `, `NodeRoles `) tuple -- or, optionally, a `Projection specification - ` or a (`Projection specification `, `feedback specification - `) tuple interposed between a pair of nodes. + `, a (`Projection specification `, `feedback specification + `) tuple, or a set of either interposed between a pair of nodes (see + `add_linear_processing_pathway ` for additional details). The list must begin and end with a node. .. * **2-item tuple**: (Pathway, `LearningFunction`) -- used to specify a `learning Pathway @@ -153,7 +154,7 @@ the **pathway** arg of its constructor; that is, depending upon how it was specified, it may or may not contain fully constructed `Components `. This is passed to the **pathways** argument of a Composition's constructor or one of its `pathway addition methods ` when the Pathway is used - in the specifiation of any of these. In contrast, when a Pathway is created by a Composition (and assigned to its + in the specification of any of these. In contrast, when a Pathway is created by a Composition (and assigned to its `pathways ` attribute), then the actual `Mechanism(s) ` and/or `Composition(s)` that comprise `Nodes `, and the `Projection(s) ` between them, are listed in the Pathway's `pathway ` attribute. @@ -162,7 +163,7 @@ or None if it is a ``template ` (i.e., was constructed on its own). * `roles ` and `Node ` attributes - if the Pathway was created by a Composition, - the `roles ` attribute `this lists the `PathwayRoles ` assigned to it by the Compositon + the `roles ` attribute `this lists the `PathwayRoles ` assigned to it by the Composition that correspond to the `NodeRoles of its Components, and the `Nodes ` with each of those `NodeRoles ` is assigned to a corresponding attribute on the Pathway. If the Pathway does not belong to a Composition (i.e., it is a `template `), then these attributes return None. @@ -188,17 +189,16 @@ """ import warnings from enum import Enum + import typecheck as tc -from psyneulink.core.components.functions.nonstateful.learningfunctions import LearningFunction from psyneulink.core.components.shellclasses import Mechanism -from psyneulink.core.globals.context import ContextFlags, handle_external_context from psyneulink.core.compositions.composition import Composition, CompositionError, NodeRole +from psyneulink.core.globals.context import ContextFlags, handle_external_context from psyneulink.core.globals.keywords import \ ANY, CONTEXT, FEEDBACK, MAYBE, NODE, LEARNING_FUNCTION, OBJECTIVE_MECHANISM, PROJECTION, TARGET_MECHANISM from psyneulink.core.globals.registry import register_category - __all__ = [ 'Pathway', 'PathwayRegistry', 'PathwayRole' ] @@ -224,7 +224,9 @@ def _is_pathway_entry_spec(entry, desired_type:tc.enum(NODE, PROJECTION, ANY)): is_proj = (_is_projection_spec(entry) or (isinstance(entry, tuple) and _is_projection_spec(entry[0]) - and entry[1] in {True, FEEDBACK, False, MAYBE})) + and entry[1] in {True, FEEDBACK, False, MAYBE}) + or (isinstance(entry, (set,list)) + and all(_is_projection_spec(item) for item in entry))) if is_node or is_proj: return True @@ -297,7 +299,7 @@ class Pathway(object): --------- pathway : list[`Node `, <`Projection `,> `Node `...] - specifies list of `Nodes ` and intercolated `Projections ` to be + specifies list of `Nodes ` and intercolated `Projections ` to be created for the Pathway. name : str : default see `name ` diff --git a/psyneulink/core/compositions/report.py b/psyneulink/core/compositions/report.py index 87a6a8de120..c4d42de68c2 100644 --- a/psyneulink/core/compositions/report.py +++ b/psyneulink/core/compositions/report.py @@ -26,10 +26,10 @@ reporting is generated as execution of each Component occurs; if `FULL ` is used, then the information is reported at the end of each `TRIAL ` executed. This always includes the input and output to a `Mechanism` or a `Composition` and its `Nodes `, and can also include the values -of their `Parameters`, depending on the specification of the **report_params** argument (using `ReportParams` options` +of their `Parameters`, depending on the specification of the **report_params** argument (using `ReportParams` options and/or the `reportOutputPref ` settings of individual Mechanisms). The output for a `nested Composition ` is indented relative to the output for the Composition within which -it is nested. Whether `simulations ` executed by a Composition's ` +it is nested. Whether `simulations ` executed by a Composition's `controller ` are reported is determined by the **report_simulations** argument, using a `ReportSimulations` option and, if displayed, is indented relative to the `controller ` that executed the simulations. Output is reported to the devices specified in the **report_to_devices** argument @@ -63,7 +63,7 @@ ----------- Output and progress reporting can include execution in `simulations ` -of a Composition's `controller `), by specifying a `ReportSimulation` option in the +of a Composition's `controller `), by specifying a `ReportSimulations` option in the **report_simulations** argument of a Composition's `run ` or `learn ` methods. .. _Report_To_Device: @@ -296,8 +296,10 @@ class ReportParams(Enum): """ Options used in the **report_params** argument of a `Composition`\'s `execution methods `, to specify the scope of reporting for values of it `Parameters` - and those of its `Nodes ` (see `Reporting Parameter values ` under - `Report_Output` for additional details). + and those of its `Nodes ` (see `Report_Output` for additional details). + COMMENT: + (see `Reporting Parameter values ` under `Report_Output` for additional details). + COMMENT .. technical_note:: Use of these options is expected in the **report_output** constructor for the `Report` object, @@ -312,16 +314,14 @@ class ReportParams(Enum): USE_PREFS defers to `reportOutputPref ` settings of individual Components. - MODULATED (aka CONTROLLED) - report all `Parameters` that are being `modulated ` (i.e., controlled) by a - `ControlMechanism` within the `Composition` (that is, those for which the corresponding `ParameterPort` - receives a `ControlProjection` from a `ControlSignal`. - CONTROLLED (aka MODULATED) report all `Parameters` that are being controlled (i.e., `modulated `) by a `ControlMechanism` within the `Composition` (that is, those for which the corresponding `ParameterPort` receives a `ControlProjection` from a `ControlSignal`). + MODULATED (aka CONTROLLED) + this is identical to `ReportParams.CONTROLLED`. + MONITORED report the `value ` of any `Mechanism` that is being `monitored ` by a `ControlMechanism` or `ObjectiveMechanism`. @@ -342,6 +342,7 @@ class ReportParams(Enum): LOGGED = auto() ALL = auto() + MODULATED = ReportParams.MODULATED CONTROLLED = ReportParams.CONTROLLED MONITORED = ReportParams.MONITORED @@ -468,6 +469,7 @@ class ReportDevices(Flag): DIVERT = auto() PNL_VIEW = auto() + CONSOLE = ReportDevices.CONSOLE RECORD = ReportDevices.RECORD DIVERT = ReportDevices.DIVERT @@ -616,7 +618,7 @@ class Report: _outermost_comp : Composition the Composition that instantiated the Report in the outermost context of execution, and on which output and progress reports are stored by `_print_and_record_reports ` - in the Compositon's `rich_diverted_reports ` and `recorded_reports + in the Composition's `rich_diverted_reports ` and `recorded_reports ` attributes if the `rich_divert ` and/or `record_reports ` are set, respectively. @@ -997,7 +999,7 @@ def report_output(self, in the output report. node : Composition or Mechanism : default None - specifies `node ` for which output is being reported. + specifies `Node ` for which output is being reported. """ if self._report_output is ReportOutput.OFF: @@ -1261,8 +1263,11 @@ def report_output(self, # Only deal with ReportOutput.FULL; ReportOutput.TERSE is handled above under content='controller_start' if report_output in {ReportOutput.FULL}: - - features = [p.parameters.value.get(context).tolist() for p in node.input_ports if p.name != OUTCOME] + try: + features = [p.parameters.value.get(context).tolist() for p in node.input_ports if p.name != OUTCOME] + except AttributeError: + features = [np.array(p.parameters.value.get(context)).tolist() + for p in node.input_ports if p.name != OUTCOME] outcome = node.input_ports[OUTCOME].parameters.value.get(context).tolist() control_allocation = [r.tolist() for r in node.control_allocation] @@ -1323,7 +1328,7 @@ def node_execution_report(self, is_controller=False ) -> Panel: """ - Generates formatted output report for the `node ` of a `Composition` or a `Mechanism`. + Generates formatted output report for the `Node ` of a `Composition` or a `Mechanism`. Called by `report_output ` for execution of a Composition, and directly by the `execute ` method of a `Mechanism` when executed on its own. @@ -1342,7 +1347,7 @@ def node_execution_report(self, if it is not specified, it is resolved by calling the node's get_input_values() method. params : 'params' or 'parameters' : default None - specifies whether to report the values of the `Parameters` of the `node ` being executed + specifies whether to report the values of the `Parameters` of the `Node ` being executed together with its input and output. output_val : 2d array : default None @@ -1679,7 +1684,7 @@ def is_logged(node, name): # Don't indent for nodes inside Panels (except Composition.controller, which is never in a Panel) if report_output is ReportOutput.FULL and not is_controller: - depth_indent = 0 + depth_indent = 0 return Padding.indent(report, depth_indent) @@ -1727,7 +1732,7 @@ def report_progress(self, # If was simulating previously, then have just exited, so: # (note: need to use transition and not explicit count of simulations, # since number of simulation trials being run is generally not known) - # - turn it off + # - turn it off self.output_reports[caller][SIMULATING] = False # Update progress report @@ -1807,15 +1812,15 @@ def _print_and_record_reports(self, report_type:str, output_report:OutputReport= self._rich_progress.console.print('') # Record output reports as they are created if len(self._execution_stack)==0 and self._report_output is not ReportOutput.OFF: - if self._rich_divert: - self._rich_diverted_reports += (f'\n{self._rich_progress.console.file.getvalue()}') - if self._record_reports: - with self._recording_console.capture() as capture: - if report_type is EXECUTE_REPORT: - self._recording_console.print(output_report.trial_report) - elif report_type is RUN_REPORT: - self._recording_console.print(output_report.run_report) - self._recorded_reports += capture.get() + if self._rich_divert: + self._rich_diverted_reports += (f'\n{self._rich_progress.console.file.getvalue()}') + if self._record_reports: + with self._recording_console.capture() as capture: + if report_type == EXECUTE_REPORT: + self._recording_console.print(output_report.trial_report) + elif report_type == RUN_REPORT: + self._recording_console.print(output_report.run_report) + self._recorded_reports += capture.get() # Record progress after execution of outer-most Composition if (self._report_output is not ReportOutput.OFF diff --git a/psyneulink/core/compositions/showgraph.py b/psyneulink/core/compositions/showgraph.py index ce106421cf9..7d3d1b208df 100644 --- a/psyneulink/core/compositions/showgraph.py +++ b/psyneulink/core/compositions/showgraph.py @@ -42,8 +42,8 @@ `learning compnents `. These are listed as the arguments for the show_graph ` method below. -*Display attributes* -- state_features (such as the colors and shapes) in which different types of nodes are displayed can -be modified by assigning a dictionary of attribute:values pairs to the **show_graph_configuration** argument of the +*Display attributes* -- state_features (such as the colors and shapes) in which different types of nodes are displayed +can be modified by assigning a dictionary of attribute:values pairs to the **show_graph_configuration** argument of the Composition's constructor. These are listed as the arguments for the ShowGraph object (used to display the graph) in the `class reference ` below. @@ -147,7 +147,6 @@ .. figure:: _static/Composition_show_graph_options_fig.svg :alt: Composition graph examples - :scale: 150 % Displays of the Composition in the `example above `, generated using various options of its `show_graph ` method. **Panel A** shows the graph with its Projections labeled @@ -216,9 +215,10 @@ PROJECTION, PROJECTIONS, ROLES, SIMULATIONS, VALUES from psyneulink.core.globals.utilities import convert_to_list -__all__ = ['DURATION', 'EXECUTION_SET', 'INITIAL_FRAME', 'MOVIE_DIR', 'MOVIE_NAME', - 'MECH_FUNCTION_PARAMS', 'NUM_TRIALS', 'NUM_RUNS', 'PORT_FUNCTION_PARAMS', - 'SAVE_IMAGES', 'SHOW', 'SHOW_CIM', 'SHOW_CONTROLLER', 'SHOW_LEARNING', 'ShowGraph', 'UNIT',] +__all__ = ['DURATION', 'EXECUTION_SET', 'INITIAL_FRAME', 'MOVIE_DIR', 'MOVIE_NAME', 'MECH_FUNCTION_PARAMS', + 'NUM_TRIALS', 'NUM_RUNS', 'PORT_FUNCTION_PARAMS', 'SAVE_IMAGES', + 'SHOW', 'SHOW_CIM', 'SHOW_CONTROLLER', 'SHOW_LEARNING', 'SHOW_PROJECTIONS_NOT_IN_COMPOSITION', + 'ShowGraph', 'UNIT',] # Arguments passed to each nested Composition @@ -237,6 +237,7 @@ SHOW_TYPES = 'show_types' SHOW_DIMENSIONS = 'show_dimensions' SHOW_PROJECTION_LABELS = 'show_projection_labels' +SHOW_PROJECTIONS_NOT_IN_COMPOSITION = 'show_projections_not_in_composition' ACTIVE_ITEMS = 'active_items' OUTPUT_FMT = 'output_fmt' @@ -353,6 +354,10 @@ class ShowGraph(): when **show_nested** is specified as False or a `Composition is nested ` below the level specified in a call to `show_graph `. + inactive_projection_color : keyword : default 'red' + specifies the color in which `Projections ` not active within the `Composition` are displayed, + when the `show_projections_not_in_composition ` option is True. + default_width : int : default 1 specifies the width to use for the outline of nodes and the body of Projection arrows. @@ -401,6 +406,7 @@ def __init__(self, controller_color='purple', learning_color='orange', composition_color='pink', + inactive_projection_color='red', # Lines: default_width = 1, active_thicker_by = 2, @@ -438,6 +444,7 @@ def __init__(self, self.controller_color =controller_color self.learning_color =learning_color self.composition_color =composition_color + self.inactive_projection_color =inactive_projection_color # Lines: self.default_projection_arrow = default_projection_arrow self.default_width = default_width @@ -463,24 +470,26 @@ def show_graph(self, show_types:bool=False, show_dimensions:bool=False, show_projection_labels:bool=False, + show_projections_not_in_composition=False, active_items=None, output_fmt:tc.optional(tc.enum('pdf','gv','jupyter','gif'))='pdf', context=None, **kwargs): """ - show_graph( \ - show_node_structure=False, \ - show_nested=NESTED, \ - show_nested_args=ALL, \ - show_cim=False, \ - show_controller=True, \ - show_learning=False, \ - show_headers=True, \ - show_types=False, \ - show_dimensions=False, \ - show_projection_labels=False, \ - active_items=None, \ - output_fmt='pdf', \ + show_graph( \ + show_node_structure=False, \ + show_nested=NESTED, \ + show_nested_args=ALL, \ + show_cim=False, \ + show_controller=True, \ + show_learning=False, \ + show_headers=True, \ + show_types=False, \ + show_dimensions=False, \ + show_projection_labels=False, \ + show_projections_not_in_composition=False \ + active_items=None, \ + output_fmt='pdf', \ context=None) Show graphical display of Components in a Composition's graph. @@ -563,6 +572,10 @@ def show_graph(self, show_projection_labels : bool : default False specifies whether or not to show names of projections. + show_projections_not_in_composition : bool : default False + specifies whether or not to show `Projections ` that are not active in the current + `Composition`; these will display in red. This option is for use in debugging. + show_headers : bool : default True specifies whether or not to show headers in the subfields of a Mechanism's node; only takes effect if **show_node_structure** is specified (see above). @@ -751,11 +764,9 @@ def show_graph(self, rcvrs = list(processing_graph.keys()) for rcvr in rcvrs: - # # MODIFIED 6/13/20 NEW: if any(n is rcvr for nested_comp in composition.nodes if isinstance(nested_comp, Composition) for n in nested_comp.nodes): continue - # # MODIFIED 6/13/20 END # If show_controller is true, objective mechanism will be # handled in _assign_controller_components @@ -780,6 +791,7 @@ def show_graph(self, show_types, show_dimensions, show_projection_labels, + show_projections_not_in_composition, nested_args) # Add cim Components to graph if show_cim @@ -793,6 +805,7 @@ def show_graph(self, show_node_structure, node_struct_args, show_projection_labels, + show_projections_not_in_composition, show_controller, comp_hierarchy) @@ -809,6 +822,7 @@ def show_graph(self, show_node_structure, node_struct_args, show_projection_labels, + show_projections_not_in_composition, comp_hierarchy, nesting_level) @@ -827,7 +841,8 @@ def show_graph(self, show_dimensions, show_node_structure, node_struct_args, - show_projection_labels) + show_projection_labels, + show_projections_not_in_composition) return self._generate_output(G, enclosing_comp, @@ -855,6 +870,7 @@ def _assign_processing_components(self, show_types, show_dimensions, show_projection_labels, + show_projections_not_in_composition, nested_args): """Assign nodes to graph""" @@ -897,7 +913,7 @@ def _assign_processing_components(self, # (node: this allows TARGET node for learning to remain marked as an INPUT node) if (NodeRole.LEARNING in composition.nodes_to_roles[rcvr]): # MODIFIED 6/13/20 OLD: FIX - MODIFIED TO ALLOW TARGET TO BE MARKED AS INPUT - # and not NodeRole.INPUT in composition.nodes_to_roles[rcvr]): + # and not NodeRole.INPUT in composition.nodes_to_roles[rcvr]): # MODIFIED 6/13/20 END return @@ -976,7 +992,7 @@ def _assign_processing_components(self, rcvr_penwidth = str(self.bold_width) rcvr_rank = self.output_rank - # OUTPUT Node + # CONTROL Node elif isinstance(rcvr, ControlMechanism): if rcvr in active_items: if self.active_color == BOLD: @@ -990,7 +1006,7 @@ def _assign_processing_components(self, rcvr_penwidth = str(self.bold_width) rcvr_rank = self.output_rank - # Composition that is neither an INPUT Node nor an OUTPUT Node + # Composition that is neither INPUT, OUTPUT or CONTROL Node elif isinstance(rcvr, Composition) and show_nested is not NESTED: if rcvr in active_items: if self.active_color == BOLD: @@ -1053,6 +1069,7 @@ def _assign_processing_components(self, show_dimensions, show_node_structure, show_projection_labels, + show_projections_not_in_composition, enclosing_comp=enclosing_comp, comp_hierarchy=comp_hierarchy, nesting_level=nesting_level) @@ -1067,6 +1084,7 @@ def _assign_cim_components(self, show_node_structure, node_struct_args, show_projection_labels, + show_projections_not_in_composition, show_controller, comp_hierarchy): @@ -1116,6 +1134,8 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, # But if any Projection to it is from a controller, use controller_color for input_port in cim.input_ports: for proj in input_port.path_afferents: + if proj not in enclosing_comp.projections and not show_projections_not_in_composition: + continue if self._trace_senders_for_controller(proj, enclosing_comp): cim_type_color = self.controller_color elif cim is composition.output_CIM: @@ -1170,6 +1190,13 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, projs = input_port.path_afferents for proj in projs: + proj_color=self.default_node_color + if proj not in enclosing_comp.projections: + if not show_projections_not_in_composition: + continue + else: + proj_color=self.inactive_projection_color + # Get label for Node that sends the input (sndr_label) sndr_node_output_port = proj.sender # Skip if sender is a CIM (handled by enclosing Composition's call to this method) @@ -1204,13 +1231,21 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, sndr_output_node_proj_label = sndr_label # Render Projection - _render_projection(enclosing_g, proj, sndr_output_node_proj_label, rcvr_cim_proj_label) + _render_projection(enclosing_g, proj, sndr_output_node_proj_label, rcvr_cim_proj_label, + proj_color) # Projections from input_CIM to INPUT nodes for output_port in composition.input_CIM.output_ports: projs = output_port.efferents for proj in projs: + proj_color = self.default_node_color + if proj not in composition.projections: + if not show_projections_not_in_composition: + continue + else: + proj_color=self.inactive_projection_color + # Get label for Node that receives the input (rcvr_label) rcvr_input_node_proj = proj.receiver if (isinstance(rcvr_input_node_proj.owner, CompositionInterfaceMechanism) @@ -1256,7 +1291,7 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, rcvr_input_node_proj_label = rcvr_label # Render Projection - _render_projection(g, proj, sndr_input_cim_proj_label, rcvr_input_node_proj_label) + _render_projection(g, proj, sndr_input_cim_proj_label, rcvr_input_node_proj_label, proj_color) # PARAMETER_CIM ------------------------------------------------------------------------- @@ -1268,6 +1303,13 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, projs = input_port.path_afferents for proj in projs: + proj_color = self.control_color + if proj not in enclosing_comp.projections: + if not show_projections_not_in_composition: + continue + else: + proj_color=self.inactive_projection_color + # Get label for Node that sends the ControlProjection (sndr label) ctl_mech_output_port = proj.sender # Skip if sender is cim (handled by enclosing Composition's call to this method) @@ -1303,14 +1345,23 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, rcvr_param_cim_proj_label = cim_label # Render Projection - _render_projection(enclosing_g, proj, sndr_ctl_sig_proj_label, rcvr_param_cim_proj_label, - self.control_color) + _render_projection(enclosing_g, proj, + sndr_ctl_sig_proj_label, + rcvr_param_cim_proj_label, + proj_color) # Projections from parameter_CIM to Nodes that are being modulated for output_port in composition.parameter_CIM.output_ports: projs = output_port.efferents for proj in projs: + proj_color = None + if proj not in composition.projections: + if not show_projections_not_in_composition: + continue + else: + proj_color=self.inactive_projection_color + # Get label for Node that receives modulation (modulated_mech_label) rcvr_modulated_mech_proj = proj.receiver if (isinstance(rcvr_modulated_mech_proj.owner, CompositionInterfaceMechanism) @@ -1348,9 +1399,9 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, # Render Projection if self._trace_senders_for_controller(proj, enclosing_comp): - ctl_proj_color = self.controller_color + ctl_proj_color = proj_color or self.controller_color else: - ctl_proj_color = self.control_color + ctl_proj_color = proj_color or self.control_color arrowhead = self.default_projection_arrow if isinstance(proj, MappingProjection) else self.control_projection_arrow @@ -1367,6 +1418,13 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, projs = input_port.path_afferents for proj in projs: + proj_color = self.default_node_color + if proj not in composition.projections: + if not show_projections_not_in_composition: + continue + else: + proj_color=self.inactive_projection_color + sndr_output_node_proj = proj.sender if (isinstance(sndr_output_node_proj.owner, CompositionInterfaceMechanism) and show_nested is not NESTED): @@ -1374,8 +1432,10 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, else: sndr_output_node_proj_owner = sndr_output_node_proj.owner # Validate the Projection is from an OUTPUT node + # or a PROBE node if allow_probes is set for a controller or its objective_mechanism if ((sndr_output_node_proj_owner in composition.nodes_to_roles and - NodeRole.OUTPUT not in composition.nodes_to_roles[sndr_output_node_proj_owner])): + not any(role for role in {NodeRole.OUTPUT, NodeRole.PROBE} if + role in composition.nodes_to_roles[sndr_output_node_proj_owner]))): raise ShowGraphError(f"Projection to output_CIM of {composition.name} " f"from node {sndr_output_node_proj_owner} that is not " f"an {NodeRole.OUTPUT} node.") @@ -1406,27 +1466,46 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, # FIX 6/23/20 PROBLEM POINT: # Render Projection - _render_projection(g, proj, sndr_output_node_proj_label, rcvr_output_cim_proj_label) + _render_projection(g, + proj, + sndr_output_node_proj_label, + rcvr_output_cim_proj_label, + proj_color) # Projections from output_CIM to Node(s) in enclosing Composition for output_port in composition.output_CIM.output_ports: projs = output_port.efferents for proj in projs: + + proj_color = self.default_node_color + if proj not in enclosing_comp.projections: + if not show_projections_not_in_composition: + continue + else: + proj_color=self.inactive_projection_color + rcvr_node_input_port = proj.receiver + + # Skip if receiver is controller of enclosing_comp (handled by _assign_controller_components) + if rcvr_node_input_port.owner is enclosing_comp.controller: + continue + # Skip if receiver is cim (handled by enclosing Composition's call to this method) - if isinstance(rcvr_node_input_port.owner, CompositionInterfaceMechanism): + if (isinstance(rcvr_node_input_port.owner, CompositionInterfaceMechanism) and + rcvr_node_input_port.owner.composition is enclosing_comp): continue + # Skip if there is no inner Composition (show_nested!=NESTED) or # or Projections across nested Compositions are not being shown (show_nested=INSET) if not enclosing_g or show_nested is INSET: continue + # Skip if show_controller and the receiver is objective mechanism if show_controller and enclosing_comp.controller \ and getattr(enclosing_comp.controller, 'objective_mechanism', None) \ is rcvr_node_input_port.owner: continue - # Skip if show_controller and the receiver is objective mechanism rcvr_node_input_port_owner = rcvr_node_input_port.owner rcvr_label = self._get_graph_node_label(composition, @@ -1454,7 +1533,11 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, sndr_output_cim_proj_label = cim_label # Render Projection - _render_projection(enclosing_g, proj, sndr_output_cim_proj_label, rcvr_input_node_proj_label) + _render_projection(enclosing_g, + proj, + sndr_output_cim_proj_label, + rcvr_input_node_proj_label, + proj_color) def _assign_controller_components(self, @@ -1469,6 +1552,7 @@ def _assign_controller_components(self, show_node_structure, node_struct_args, show_projection_labels, + show_projections_not_in_composition, comp_hierarchy, nesting_level): """Assign control nodes and edges to graph""" @@ -1628,7 +1712,8 @@ def find_rcvr_comp(r, c, l): arrowhead=ctl_proj_arrowhead ) - # If controller has objective_mechanism, assign its node and Projections + # If controller has objective_mechanism, assign its node and Projections, + # including one from ObjectiveMechanism to controller if controller.objective_mechanism: # get projection from ObjectiveMechanism to ControlMechanism objmech_ctlr_proj = controller.input_port.path_afferents[0] @@ -1735,6 +1820,56 @@ def find_rcvr_comp(r, c, l): g.edge(sndr_proj_label, objmech_proj_label, label=edge_label, color=proj_color, penwidth=proj_width) + # If controller has no objective_mechanism but does have outcome_input_ports, add Projections from them + elif controller.num_outcome_input_ports: + # incoming edges (from monitored mechs directly to controller) + for outcome_input_port in controller.outcome_input_ports: + for projection in outcome_input_port.path_afferents: + if controller in active_items: + if self.active_color == BOLD: + proj_color = self.controller_color + else: + proj_color = self.active_color + proj_width = str(self.default_width + self.active_thicker_by) + composition.active_item_rendered = True + else: + proj_color = self.controller_color + proj_width = str(self.default_width) + if show_node_structure: + sndr_proj_label = self._get_graph_node_label(composition, + projection.sender.owner, + show_types, + show_dimensions) + if (projection.sender.owner not in composition.nodes + and not controller.allow_probes): + num_nesting_levels = self.num_nesting_levels or 0 + nested_comp = projection.sender.owner.composition + try: + nesting_depth = next((k for k, v in comp_hierarchy.items() if v == nested_comp)) + sender_visible = nesting_depth <= num_nesting_levels + except StopIteration: + sender_visible = False + else: + sender_visible = True + if sender_visible: + sndr_proj_label += ':' + controller._get_port_name(projection.sender) + ctlr_input_proj_label = ctlr_label + ':' + controller._get_port_name(outcome_input_port) + else: + sndr_proj_label = self._get_graph_node_label(composition, + projection.sender.owner, + show_types, + show_dimensions) + ctlr_input_proj_label = self._get_graph_node_label(composition, + controller, + show_types, + show_dimensions) + if show_projection_labels: + edge_label = projection.name + else: + edge_label = '' + g.edge(sndr_proj_label, ctlr_input_proj_label, label=edge_label, + color=proj_color, penwidth=proj_width) + # If controller has an agent_rep, assign its node and edges (not Projections per se) if hasattr(controller, 'agent_rep') and controller.agent_rep and show_controller==AGENT_REP : # get agent_rep @@ -1763,7 +1898,8 @@ def find_rcvr_comp(r, c, l): # get any other incoming edges to controller (i.e., other than from ObjectiveMechanism) senders = set() - for i in controller.input_ports[1:]: + # FIX: 11/3/21 - NEED TO MODIFY ONCE OUTCOME InputPorts ARE MOVED + for i in controller.input_ports[controller.num_outcome_input_ports:]: for p in i.path_afferents: senders.add(p.sender.owner) self._assign_incoming_edges(g, @@ -1778,6 +1914,7 @@ def find_rcvr_comp(r, c, l): show_dimensions, show_node_structure, show_projection_labels, + show_projections_not_in_composition, proj_color=ctl_proj_color, comp_hierarchy=comp_hierarchy, nesting_level=nesting_level) @@ -1796,7 +1933,8 @@ def _assign_learning_components(self, show_dimensions, show_node_structure, node_struct_args, - show_projection_labels): + show_projection_labels, + show_projections_not_in_composition): """Assign learning nodes and edges to graph""" from psyneulink.core.compositions.composition import NodeRole @@ -1870,6 +2008,7 @@ def _assign_learning_components(self, show_dimensions, show_node_structure, show_projection_labels, + show_projections_not_in_composition, enclosing_comp=enclosing_comp, comp_hierarchy=comp_hierarchy, nesting_level=nesting_level) @@ -1881,6 +2020,7 @@ def _render_projection_as_node(self, show_types, show_dimensions, show_projection_labels, + show_projections_not_in_composition, proj, label, proj_color, @@ -1957,6 +2097,7 @@ def _assign_incoming_edges(self, show_dimensions, show_node_structure, show_projection_labels, + show_projections_not_in_composition, proj_color=None, proj_arrow=None, enclosing_comp=None, @@ -1979,14 +2120,16 @@ def _assign_incoming_edges(self, if show_nested is NESTED: # Add output_CIMs for nested Comps to find sender nodes cims = set([proj.sender.owner for proj in rcvr.afferents - if (isinstance(proj.sender.owner, CompositionInterfaceMechanism) + if (proj in composition.projections + and isinstance(proj.sender.owner, CompositionInterfaceMechanism) and (proj.sender.owner is proj.sender.owner.composition.output_CIM))]) senders.update(cims) # Get sender Node from outer Composition (enclosing_g) if enclosing_g and show_nested is not INSET: # Add input_CIM for current Composition to find senders from enclosing_g cims = set([proj.sender.owner for proj in rcvr.afferents - if (isinstance(proj.sender.owner, CompositionInterfaceMechanism) + if (proj in composition.projections + and isinstance(proj.sender.owner, CompositionInterfaceMechanism) and proj.sender.owner in {composition.input_CIM, composition.parameter_CIM})]) senders.update(cims) # HACK: FIX 6/13/20 - ADD USER-SPECIFIED TARGET NODE FOR INNER COMOSITION (NOT IN processing_graph) @@ -2092,6 +2235,7 @@ def assign_sender_edge(sndr:Union[Mechanism, Composition], show_types, show_dimensions, show_projection_labels, + show_projections_not_in_composition, proj, label=proc_mech_label, rcvr_label=proc_mech_rcvr_label, @@ -2135,10 +2279,11 @@ def assign_sender_edge(sndr:Union[Mechanism, Composition], proj_color = proj_color_default proj_arrowhead = proj_arrow_default - - # Skip Projections not in the Composition if proj not in composition.projections: - continue + if not show_projections_not_in_composition: + continue + else: + proj_color=self.inactive_projection_color assign_proj_to_enclosing_comp = False @@ -2157,29 +2302,6 @@ def assign_sender_edge(sndr:Union[Mechanism, Composition], # and therefore is not passing an afferent Projection from that Composition if not sender.afferents and rcvr is not composition.controller: continue - # # MODIFIED 4/5/21 OLD: - # # Get node(s) from enclosing Comopsition that is/are source(s) of sender(s) - # sndr_spec = self._trace_senders_for_original_sender_mechanism(proj, nesting_level) - # if not sndr_spec: - # continue - # sndr, sndr_port, sndr_nesting_level = sndr_spec - # # if original sender is more than one level above receiver, replace enclosing_g with - # # the g of the original sender composition - # enclosing_comp = comp_hierarchy[sndr_nesting_level] - # enclosing_g = enclosing_comp._show_graph.G - # # Skip: - # # - cims as sources (handled in _assign_cim_componoents) - # # - controller (handled in _assign_controller_components) - # if (isinstance(sndr, CompositionInterfaceMechanism) and - # rcvr is not enclosing_comp.controller - # and rcvr is not composition.controller - # or self._is_composition_controller(sndr, enclosing_comp)): - # continue - # if sender is composition.parameter_CIM: - # proj_color = self.control_color - # proj_arrowhead = self.control_projection_arrow - # assign_proj_to_enclosing_comp = True - # MODIFIED 4/5/21 NEW: # FIX: LOOP HERE OVER sndr_spec IF THERE ARE SEVERAL # Get node(s) from enclosing Comopsition that is/are source(s) of sender(s) sndrs_specs = self._trace_senders_for_original_sender_mechanism(proj, nesting_level) @@ -2219,7 +2341,7 @@ def assign_sender_edge(sndr:Union[Mechanism, Composition], # Get Node from nested Composition that projects to rcvr sndr = [k.owner for k,v in sender.port_map.items() if v[1] is proj.sender][0] # Skip: - # - cims as sources (handled in _assign_cim_compmoents) + # - cims as sources (handled in _assign_cim_components) # - controller (handled in _assign_controller_components) # NOTE 7/20/20: if receiver is a controller, then we need to skip this block or shadow inputs # will not be rendered -DS diff --git a/psyneulink/core/globals/context.py b/psyneulink/core/globals/context.py index b916661f9dc..a9a52284d63 100644 --- a/psyneulink/core/globals/context.py +++ b/psyneulink/core/globals/context.py @@ -93,7 +93,7 @@ import time as py_time # "time" is declared below import typecheck as tc -from psyneulink.core.globals.keywords import CONTEXT, CONTROL, EXECUTING, EXECUTION_PHASE, FLAGS, INITIALIZATION_STATUS, INITIALIZING, LEARNING, SEPARATOR_BAR, SOURCE, VALIDATE +from psyneulink.core.globals.keywords import CONTEXT, CONTROL, EXECUTING, EXECUTION_PHASE, FLAGS, INITIALIZING, LEARNING, SEPARATOR_BAR, SOURCE, VALIDATE from psyneulink.core.globals.utilities import get_deepcopy_with_shared @@ -235,6 +235,7 @@ def _get_context_string(cls, condition_flags, string += ", ".join(flagged_items) return string + INITIALIZATION_STATUS_FLAGS = {ContextFlags.DEFERRED_INIT, ContextFlags.INITIALIZING, ContextFlags.VALIDATING, @@ -390,7 +391,9 @@ def composition(self, composition): # if isinstance(composition, Composition): if ( composition is None - or composition.__class__.__name__ in {'Composition', 'AutodiffComposition'} + or composition.__class__.__name__ in {'Composition', + 'AutodiffComposition', + 'ParameterEstimationComposition'} ): self._composition = composition else: @@ -570,28 +573,28 @@ def _get_time(component, context): """ from psyneulink.core.globals.context import time - from psyneulink.core.components.shellclasses import Mechanism, Projection, Port + from psyneulink.core.components.shellclasses import Mechanism, Projection, Port, Function no_time = time(None, None, None, None) # Get mechanism to which Component being logged belongs if isinstance(component, Mechanism): ref_mech = component - elif isinstance(component, Port): + elif isinstance(component, (Port, Function)): if isinstance(component.owner, Mechanism): ref_mech = component.owner elif isinstance(component.owner, Projection): ref_mech = component.owner.receiver.owner else: - raise ContextError("Logging currently does not support {} (only {}s, {}s, and {}s).". + raise ContextError("Logging currently does not support {} (only {}s, {}s, {}s, and {}s).". format(component.__class__.__name__, - Mechanism.__name__, Port.__name__, Projection.__name__)) + Mechanism.__name__, Port.__name__, Projection.__name__, Function.__name__)) elif isinstance(component, Projection): ref_mech = component.receiver.owner else: - raise ContextError("Logging currently does not support {} (only {}s, {}s, and {}s).". + raise ContextError("Logging currently does not support {} (only {}s, {}s, {}s, and {}s).". format(component.__class__.__name__, - Mechanism.__name__, Port.__name__, Projection.__name__)) + Mechanism.__name__, Port.__name__, Projection.__name__, Function.__name__)) # Get Composition in which it is being (or was last) executed (if any): diff --git a/psyneulink/core/globals/defaults.py b/psyneulink/core/globals/defaults.py index cc83227100a..34c5e4b2c86 100644 --- a/psyneulink/core/globals/defaults.py +++ b/psyneulink/core/globals/defaults.py @@ -37,6 +37,8 @@ class DefaultControlAllocationMode(Enum): GUMBY_MODE = 0.0 BADGER_MODE = 1.0 TEST_MODE = 240 + + defaultControlAllocation = DefaultControlAllocationMode.BADGER_MODE.value #: This is a string # Default gating policy mode values: @@ -45,4 +47,6 @@ class DefaultGatingAllocationMode(Enum): TONIC_MODE = 0.5 SLEEP_MODE = 0.0 TEST_MODE = 240 + + defaultGatingAllocation = DefaultGatingAllocationMode.TONIC_MODE.value diff --git a/psyneulink/core/globals/keywords.py b/psyneulink/core/globals/keywords.py index b7d7c83c53d..d27a777ebc0 100644 --- a/psyneulink/core/globals/keywords.py +++ b/psyneulink/core/globals/keywords.py @@ -26,22 +26,23 @@ __all__ = [ 'ACCUMULATOR_INTEGRATOR', 'ACCUMULATOR_INTEGRATOR_FUNCTION', 'ADAPTIVE', 'ADAPTIVE_INTEGRATOR_FUNCTION', 'ADAPTIVE_MECHANISM', 'ADD_INPUT_PORT', 'ADD_OUTPUT_PORT', - 'ADDITIVE', 'ADDITIVE_PARAM', 'AFTER', 'ALL', 'ALLOCATION_SAMPLES', 'ANGLE', 'ANGLE_FUNCTION', 'ANY', - 'ARGUMENT_THERAPY_FUNCTION', 'ARRANGEMENT', 'ASSERT', 'ASSIGN', 'ASSIGN_VALUE', 'AUTO','AUTO_ASSIGN_MATRIX', - 'AUTO_ASSOCIATIVE_PROJECTION', 'HAS_INITIALIZERS', 'AUTOASSOCIATIVE_LEARNING_MECHANISM', 'LEARNING_MECHANISMS', + 'ADDITIVE', 'ADDITIVE_PARAM', 'AFTER', 'ALL', 'ALLOCATION_SAMPLES', 'ALLOW_PROBES', 'ANGLE', 'ANGLE_FUNCTION', + 'ANY', 'ARGUMENT_THERAPY_FUNCTION', 'ARRANGEMENT', 'ASSERT', 'ASSIGN', 'ASSIGN_VALUE', 'AUTO','AUTO_ASSIGN_MATRIX', + 'AUTO_ASSOCIATIVE_PROJECTION', 'HAS_INITIALIZERS', 'AUTOASSOCIATIVE_LEARNING_MECHANISM', 'AUTODIFF_COMPOSITION', 'BACKPROPAGATION_FUNCTION', 'BEFORE', 'BETA', 'BIAS', 'BOLD', 'BOTH', 'BOUNDS', 'BUFFER_FUNCTION', 'CHANGED', 'CLAMP_INPUT', 'COMBINATION_FUNCTION_TYPE', 'COMBINE', 'COMBINE_MEANS_FUNCTION', 'COMBINE_OUTCOME_AND_COST_FUNCTION', 'COMMAND_LINE', 'comparison_operators', 'COMPARATOR_MECHANISM', 'COMPONENT', - 'COMPONENT_INIT', 'COMPONENT_PREFERENCE_SET', 'COMPOSITION', 'COMPOSITION_INTERFACE_MECHANISM', - 'CONCATENATE_FUNCTION', 'CONDITION', 'CONDITIONS', 'CONSTANT', 'ContentAddressableMemory_FUNCTION', - 'CONTEXT', 'CONTROL', 'CONTROL_MECHANISM', 'CONTROL_PATHWAY', 'CONTROL_PROJECTION', 'CONTROL_PROJECTION_PARAMS', + 'COMPONENT_INIT', 'COMPONENT_PREFERENCE_SET', 'COMPOSITION', 'COMPOSITION_FUNCTION_APPROXIMATOR', + 'COMPOSITION_INTERFACE_MECHANISM', 'CONCATENATE', 'CONCATENATE_FUNCTION', 'CONDITION', 'CONDITIONS', 'CONSTANT', + 'ContentAddressableMemory_FUNCTION', 'CONTEXT', + 'CONTROL', 'CONTROL_MECHANISM', 'CONTROL_PATHWAY', 'CONTROL_PROJECTION', 'CONTROL_PROJECTION_PARAMS', 'CONTROL_PROJECTIONS', 'CONTROL_SIGNAL', 'CONTROL_SIGNAL_SPECS', 'CONTROL_SIGNALS', 'CONTROLLED_PARAMS', 'CONTROLLER', 'CONTROLLER_OBJECTIVE', 'CORRELATION', 'COSINE', 'COST_FUNCTION', 'COUNT', 'CROSS_ENTROPY', 'CURRENT_EXECUTION_TIME', 'CUSTOM_FUNCTION', 'CYCLE', 'DDM_MECHANISM', 'DECAY', 'DEFAULT', 'DEFAULT_CONTROL_MECHANISM', 'DEFAULT_MATRIX', 'DEFAULT_PREFERENCE_SET_OWNER', 'DEFAULT_PROCESSING_MECHANISM', 'DEFAULT_VARIABLE', 'DEFERRED_ASSIGNMENT', 'DEFERRED_DEFAULT_NAME', 'DEFERRED_INITIALIZATION', 'DictionaryMemory_FUNCTION', - 'DIFFERENCE', 'DIFFERENCE', 'DIFFUSION', 'DISABLE', 'DISABLE_PARAM', 'DIST_FUNCTION_TYPE', 'DIST_MEAN', + 'DIFFERENCE', 'DIFFERENCE', 'DIFFUSION', 'DIRECT', 'DISABLE', 'DISABLE_PARAM', 'DIST_FUNCTION_TYPE', 'DIST_MEAN', 'DIST_SHAPE', 'DISTANCE_FUNCTION', 'DISTANCE_METRICS', 'DISTRIBUTION_FUNCTION_TYPE', 'DIVISION', 'DRIFT_DIFFUSION_INTEGRATOR_FUNCTION', 'DRIFT_ON_A_SPHERE_INTEGRATOR_FUNCTION', 'DUAL_ADAPTIVE_INTEGRATOR_FUNCTION', 'EID_SIMULATION', 'EID_FROZEN', 'EITHER', 'ENABLE_CONTROLLER', 'ENABLED', 'ENERGY', 'ENTROPY', @@ -63,10 +64,10 @@ 'INTEGRATOR_FUNCTION','INTEGRATOR_FUNCTION', 'INTEGRATOR_FUNCTION_TYPE', 'INTEGRATOR_MECHANISM', 'LAST_INTEGRATED_VALUE', 'INTERCEPT', 'INTERNAL', 'INTERNAL_ONLY', 'K_VALUE', 'KOHONEN_FUNCTION', 'KOHONEN_MECHANISM', 'KOHONEN_LEARNING_MECHANISM', 'KWTA_MECHANISM', - 'LABELS', 'LCA_MECHANISM', 'LEAKY_COMPETING_INTEGRATOR_FUNCTION', 'LEAK', 'LEARNED_PARAM', 'LEARNED_PROJECTIONS', - 'LEARNING', 'LEARNING_FUNCTION', 'LEARNING_FUNCTION_TYPE', 'LEARNING_OBJECTIVE', 'LEARNING_MECHANISM', - 'LEARNING_PATHWAY', 'LEARNING_PROJECTION', 'LEARNING_PROJECTION_PARAMS', 'LEARNING_RATE', 'LEARNING_SIGNAL', - 'LEARNING_SIGNAL_SPECS', 'LEARNING_SIGNALS', + 'LABELS', 'LCA_MECHANISM', 'LEAKY_COMPETING_INTEGRATOR_FUNCTION', 'LEAK', + 'LEARNED_PARAM', 'LEARNED_PROJECTIONS', 'LEARNING', 'LEARNING_FUNCTION', 'LEARNING_FUNCTION_TYPE', + 'LEARNING_OBJECTIVE', 'LEARNING_MECHANISM', 'LEARNING_MECHANISMS', 'LEARNING_PATHWAY', 'LEARNING_PROJECTION', + 'LEARNING_PROJECTION_PARAMS', 'LEARNING_RATE', 'LEARNING_SIGNAL', 'LEARNING_SIGNAL_SPECS', 'LEARNING_SIGNALS', 'LESS_THAN', 'LESS_THAN_OR_EQUAL', 'LINEAR', 'LINEAR_COMBINATION_FUNCTION', 'LINEAR_FUNCTION', 'LINEAR_MATRIX_FUNCTION', 'LOG_ENTRIES', 'LOGISTIC_FUNCTION', 'LOW', 'LVOC_CONTROL_MECHANISM', 'L0', 'L1', 'MAPPING_PROJECTION', 'MAPPING_PROJECTION_PARAMS', 'MASKED_MAPPING_PROJECTION', @@ -75,14 +76,14 @@ 'MAX_EXECUTIONS_BEFORE_FINISHED', 'MAX_INDICATOR', 'MAX_VAL', 'MAYBE', 'MEAN', 'MECHANISM', 'MECHANISM_COMPONENT_CATEGORY', 'MECHANISM_DEFAULT', 'MECHANISM_DEFAULTInputValue', 'MECHANISM_DEFAULTParams', 'MECHANISM_EXECUTED_LOG_ENTRY', 'MECHANISM_NAME', 'MECHANISM_PARAM_VALUE', - 'MECHANISM_TYPE', 'MECHANISM_VALUE', 'MEDIAN', 'METRIC', 'MIN_VAL', 'MIN_ABS_VAL', 'MIN_ABS_INDICATOR', 'MODE', - 'MODULATES','MODULATION', 'MODULATORY_PROJECTION', 'MODULATORY_SIGNAL', 'MODULATORY_SIGNALS', + 'MECHANISM_TYPE', 'MECHANISM_VALUE', 'MEDIAN', 'METRIC', 'MIN_VAL', 'MIN_ABS_VAL', 'MIN_ABS_INDICATOR', + 'MODE', 'MODULATES','MODULATION', 'MODULATORY_PROJECTION', 'MODULATORY_SIGNAL', 'MODULATORY_SIGNALS', 'MONITOR', 'MONITOR_FOR_CONTROL', 'MONITOR_FOR_LEARNING', 'MONITOR_FOR_MODULATION', - 'MODEL_SPEC_ID_GENERIC', 'MODEL_SPEC_ID_INPUT_PORTS', 'MODEL_SPEC_ID_OUTPUT_PORTS', 'MODEL_SPEC_ID_PSYNEULINK', - 'MODEL_SPEC_ID_SENDER_MECH', 'MODEL_SPEC_ID_SENDER_PORT', 'MODEL_SPEC_ID_RECEIVER_MECH', - 'MODEL_SPEC_ID_RECEIVER_PORT', - 'MODEL_SPEC_ID_PARAMETER_SOURCE', 'MODEL_SPEC_ID_PARAMETER_VALUE', 'MODEL_SPEC_ID_TYPE', 'MSE', - 'MULTIPLICATIVE', 'MULTIPLICATIVE_PARAM', 'MUTUAL_ENTROPY', + 'MODEL_SPEC_ID_GENERIC', 'MODEL_SPEC_ID_INPUT_PORTS', 'MODEL_SPEC_ID_OUTPUT_PORTS', + 'MODEL_SPEC_ID_PSYNEULINK', 'MODEL_SPEC_ID_SENDER_MECH', 'MODEL_SPEC_ID_SENDER_PORT', + 'MODEL_SPEC_ID_RECEIVER_MECH', 'MODEL_SPEC_ID_RECEIVER_PORT','MODEL_SPEC_ID_PARAMETER_SOURCE', + 'MODEL_SPEC_ID_PARAMETER_VALUE', 'MODEL_SPEC_ID_TYPE', + 'MSE', 'MULTIPLICATIVE', 'MULTIPLICATIVE_PARAM', 'MUTUAL_ENTROPY', 'NAME', 'NESTED', 'NEWEST', 'NODE', 'NOISE', 'NORMAL_DIST_FUNCTION', 'NORMED_L0_SIMILARITY', 'NOT_EQUAL', 'NUM_EXECUTIONS_BEFORE_FINISHED', 'OBJECTIVE_FUNCTION_TYPE', 'OBJECTIVE_MECHANISM', 'OBJECTIVE_MECHANISM_OBJECT', 'OFF', 'OFFSET', 'OLDEST', 'ON', @@ -105,10 +106,10 @@ 'RANDOM', 'RANDOM_CONNECTIVITY_MATRIX', 'RATE', 'RATIO', 'REARRANGE_FUNCTION', 'RECEIVER', 'RECEIVER_ARG', 'RECURRENT_TRANSFER_MECHANISM', 'REDUCE_FUNCTION', 'REFERENCE_VALUE', 'RESET', 'RESET_STATEFUL_FUNCTION_WHEN', 'RELU_FUNCTION', 'REST', 'RESULT', 'RESULT', 'ROLES', 'RL_FUNCTION', 'RUN', - 'SAMPLE', 'SAVE_ALL_VALUES_AND_POLICIES', 'SCALAR', 'SCALE', 'SCHEDULER', 'SELF', 'SENDER', 'SEPARATOR_BAR', - 'SHADOW_INPUT_NAME', 'SHADOW_INPUTS', 'SIMPLE', 'SIMPLE_INTEGRATOR_FUNCTION', 'SIMULATIONS', 'SINGLETON', 'SIZE', - 'SLOPE', 'SOFT_CLAMP', 'SOFTMAX_FUNCTION', 'SOURCE', 'SSE', 'STABILITY_FUNCTION', 'STANDARD_ARGS', - 'STANDARD_DEVIATION', 'STANDARD_OUTPUT_PORTS', 'SUBTRACTION', 'SUM', + 'SAMPLE', 'SAVE_ALL_VALUES_AND_POLICIES', 'SCALAR', 'SCALE', 'SCHEDULER', 'SELF', 'SENDER', 'SEPARATE', + 'SEPARATOR_BAR', 'SHADOW_INPUT_NAME', 'SHADOW_INPUTS', 'SIMPLE', 'SIMPLE_INTEGRATOR_FUNCTION', 'SIMULATIONS', + 'SINGLETON', 'SIZE', 'SLOPE', 'SOFT_CLAMP', 'SOFTMAX_FUNCTION', 'SOURCE', 'SSE', 'STABILITY_FUNCTION', + 'STANDARD_ARGS', 'STANDARD_DEVIATION', 'STANDARD_OUTPUT_PORTS', 'SUBTRACTION', 'SUM', 'TARGET', 'TARGET_MECHANISM', 'TARGET_LABELS_DICT', 'TERMINAL', 'TERMINATION_MEASURE', 'TERMINATION_THRESHOLD', 'TERMINATION_COMPARISION_OP', 'TERSE', 'THRESHOLD', 'TIME', 'TIME_STEP_SIZE', 'TIME_STEPS_DIM', 'TRAINING_SET', 'TRANSFER_FUNCTION_TYPE', 'TRANSFER_MECHANISM', 'TRANSFER_WITH_COSTS_FUNCTION', 'TRIAL', 'TRIALS_DIM', @@ -335,9 +336,9 @@ def _is_metric(metric): AFTER = 'after' OLDEST = 'oldest' NEWEST = 'newest' - FULL = 'full' TERSE = 'terse' +DIRECT = 'direct' LESS_THAN = '<' LESS_THAN_OR_EQUAL = '<=' @@ -389,7 +390,12 @@ def _is_metric(metric): #region ---------------------------------------------- COMPOSITION ------------------------------------------------- +# 11/15/21: FIX - CHANGE TO LOWER CASE FOR USE WITH componentCategory (OR CHANGE THAT?); MAY NEED TO CHANGE TESTS +# Composition Categories COMPOSITION = 'COMPOSITION' +AUTODIFF_COMPOSITION = 'AutodiffComposition' +COMPOSITION_FUNCTION_APPROXIMATOR = 'CompositionFunctionApproximator' + INPUT_CIM_NAME = 'INPUT_CIM' OUTPUT_CIM_NAME = 'OUTPUT_CIM' PARAMETER_CIM_NAME = 'PARAMETER_CIM' @@ -631,6 +637,7 @@ def _is_metric(metric): TRIAL = 'trial' ROLES = 'roles' +ALLOW_PROBES = 'allow_probes' CONDITIONS = 'conditions' VALUES = 'values' FUNCTIONS = 'functions' @@ -771,7 +778,6 @@ def _is_metric(metric): EVC_SIMULATION = 'CONTROL SIMULATION' ALLOCATION_SAMPLES = "allocation_samples" - # GatingMechanism GATING_SIGNALS = 'gating_signals' GATING_SIGNAL_SPECS = 'GATING_SIGNAL_SPECS' @@ -902,6 +908,8 @@ def _is_metric(metric): SINUSOID = 'sinusoid' COMBINE = 'combine' +CONCATENATE = 'concatenate' +SEPARATE = 'separate' SUM = 'sum' DIFFERENCE = DIFFERENCE # Defined above for DISTANCE_METRICS PRODUCT = 'product' diff --git a/psyneulink/core/globals/log.py b/psyneulink/core/globals/log.py index 72872bf4c24..c6580e6c610 100644 --- a/psyneulink/core/globals/log.py +++ b/psyneulink/core/globals/log.py @@ -470,6 +470,7 @@ def from_string(s): except KeyError: raise LogError("\'{}\' is not a value of {}".format(s, LogCondition)) + TIME_NOT_SPECIFIED = 'Time Not Specified' EXECUTION_CONDITION_NAMES = {LogCondition.PROCESSING.name, LogCondition.LEARNING.name, @@ -482,6 +483,8 @@ class LogTimeScaleIndices(AutoNumber): TRIAL = () PASS = () TIME_STEP = () + + NUM_TIME_SCALES = len(LogTimeScaleIndices.__members__) TIME_SCALE_NAMES = list(LogTimeScaleIndices.__members__) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 63a82d3f668..feabfa83a85 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -296,7 +296,6 @@ def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, co import typing import weakref -import numpy as np from psyneulink.core.rpc.graph_pb2 import Entry, ndArray from psyneulink.core.globals.context import Context, ContextError, ContextFlags, _get_time, handle_external_context @@ -366,7 +365,7 @@ def copy_parameter_value(value, shared_types=None, memo=None): from psyneulink.core.components.component import Component, ComponentsMeta if shared_types is None: - shared_types = (Component, ComponentsMeta, types.MethodType) + shared_types = (Component, ComponentsMeta, types.MethodType, types.ModuleType) else: shared_types = tuple(shared_types) @@ -784,6 +783,12 @@ class Parameter(ParameterBase): :default: None + port + stores a reference to the ParameterPort that modulates this + Parameter, if applicable + + :default: None + """ # The values of these attributes will never be inherited from parent Parameters # KDM 7/12/18: consider inheriting ONLY default_value? @@ -847,6 +852,7 @@ def __init__( reference=False, dependencies=None, initializer=None, + port=None, _owner=None, _inherited=False, # this stores a reference to the Parameter object that is the @@ -855,7 +861,6 @@ def __init__( _inherited_source=None, _user_specified=False, # if modulated, set to the ParameterPort - _port=None, **kwargs ): if isinstance(aliases, str): @@ -910,10 +915,10 @@ def __init__( reference=reference, dependencies=dependencies, initializer=initializer, + port=port, _inherited=_inherited, _inherited_source=_inherited_source, _user_specified=_user_specified, - _port=_port, **kwargs ) @@ -1042,10 +1047,7 @@ def _inherited(self, value): self._is_invalid_source = value if value: - for attr in self._param_attrs: - if attr not in self._uninherited_attrs: - self._inherited_attrs_cache[attr] = getattr(self, attr) - delattr(self, attr) + self._cache_inherited_attrs() else: # This is a rare operation, so we can just immediately # trickle down sources without performance issues. @@ -1066,22 +1068,32 @@ def _inherited(self, value): next_child._inherit_from(self) children.extend(next_child._owner._children) - for attr in self._param_attrs: - if ( - attr not in self._uninherited_attrs - and getattr(self, attr) is getattr(self._parent, attr) - ): - setattr(self, attr, self._inherited_attrs_cache[attr]) + self._restore_inherited_attrs() self.__inherited = value def _inherit_from(self, parent): self._inherited_source = weakref.ref(parent) - def _cache_inherited_attrs(self): + def _cache_inherited_attrs(self, exclusions=None): + if exclusions is None: + exclusions = self._uninherited_attrs + for attr in self._param_attrs: - if attr not in self._uninherited_attrs: + if attr not in exclusions: self._inherited_attrs_cache[attr] = getattr(self, attr) + delattr(self, attr) + + def _restore_inherited_attrs(self, exclusions=None): + if exclusions is None: + exclusions = self._uninherited_attrs + + for attr in self._param_attrs: + if ( + attr not in exclusions + and getattr(self, attr) is getattr(self._parent, attr) + ): + setattr(self, attr, self._inherited_attrs_cache[attr]) @property def _parent(self): @@ -1521,7 +1533,7 @@ def clear_history( pass def _initialize_from_context(self, context=None, base_context=Context(execution_id=None), override=True): - from psyneulink.core.components.component import Component + from psyneulink.core.components.component import Component, ComponentsMeta try: try: @@ -1540,7 +1552,7 @@ def _initialize_from_context(self, context=None, base_context=Context(execution_ except KeyError: new_history = NotImplemented - shared_types = (Component, types.MethodType) + shared_types = (Component, ComponentsMeta, types.MethodType, types.ModuleType) if isinstance(new_val, (dict, list)): new_val = copy_iterable_with_shared(new_val, shared_types) @@ -1758,12 +1770,42 @@ def __getattr__(self, attr): except AttributeError: return super().__getattr__(attr) + def __setattr__(self, attr, value): + if self._source_exists and attr in self._sourced_attrs: + setattr(self.source, attr, value) + else: + super().__setattr__(attr, value) + + def _cache_inherited_attrs(self): + super()._cache_inherited_attrs( + exclusions=self._uninherited_attrs.union(self._sourced_attrs) + ) + + def _restore_inherited_attrs(self): + super()._restore_inherited_attrs( + exclusions=self._uninherited_attrs.union(self._sourced_attrs) + ) + def _set_name(self, name): if self.shared_parameter_name is None: self.shared_parameter_name = name super(Parameter, self).__setattr__('name', name) + @handle_external_context() + def get_previous( + self, + context=None, + index: int = 1, + range_start: int = None, + range_end: int = None, + ): + return self.source.get_previous(context, index, range_start, range_end) + + @handle_external_context() + def get_delta(self, context=None): + return self.source.get_delta(context) + @property def source(self): try: @@ -1776,11 +1818,19 @@ def source(self): f' cannot be stateful.' ) obj = obj.values[None] - except (AttributeError, KeyError): + except AttributeError: try: obj = getattr(self._owner._owner, self.attribute_name) except AttributeError: return None + except KeyError: + # KeyError means there is no stored value for this + # parameter, which should only occur when the source is + # desired for a descriptive parameter attribute value (e.g. + # stateful or loggable) and when either self._owner._owner + # is a type or is in the process of instantiating a + # Parameter for an instance of a Component + obj = getattr(self._owner._owner.defaults, self.attribute_name) try: obj = getattr(obj.parameters, self.shared_parameter_name) @@ -1791,7 +1841,7 @@ def source(self): delattr(self, p) except AttributeError: pass - self._source_exists = True + self._source_exists = True return obj except AttributeError: return None @@ -1804,6 +1854,10 @@ def final_source(self): return base_param + @property + def _sourced_attrs(self): + return set([a for a in self._param_attrs if a not in self._unsourced_attrs]) + class FunctionParameter(SharedParameter): """ diff --git a/psyneulink/core/globals/preferences/preferenceset.py b/psyneulink/core/globals/preferences/preferenceset.py index 1981490abae..e63272496ac 100644 --- a/psyneulink/core/globals/preferences/preferenceset.py +++ b/psyneulink/core/globals/preferences/preferenceset.py @@ -1016,20 +1016,20 @@ def show(self, type=None): def _assign_prefs(object, prefs, prefs_class:PreferenceSet): - if isinstance(prefs, PreferenceSet): - object.prefs = prefs - # FIX: CHECK LEVEL HERE?? OR DOES IT NOT MATTER, AS OWNER WILL BE ASSIGNED DYNAMICALLY?? - # Otherwise, if prefs is a specification dict instantiate it, or if it is None assign defaults - else: - object.prefs = prefs_class(owner=object, prefs=prefs) - try: - # assign log conditions from preferences - object.parameters.value.log_condition = object.prefs._log_pref.setting - except AttributeError: - pass - - try: - # assign delivery conditions from preferences - object.parameters.value.delivery_condition = object.prefs._delivery_pref.setting - except AttributeError: - pass + if isinstance(prefs, PreferenceSet): + object.prefs = prefs + # FIX: CHECK LEVEL HERE?? OR DOES IT NOT MATTER, AS OWNER WILL BE ASSIGNED DYNAMICALLY?? + # Otherwise, if prefs is a specification dict instantiate it, or if it is None assign defaults + else: + object.prefs = prefs_class(owner=object, prefs=prefs) + try: + # assign log conditions from preferences + object.parameters.value.log_condition = object.prefs._log_pref.setting + except AttributeError: + pass + + try: + # assign delivery conditions from preferences + object.parameters.value.delivery_condition = object.prefs._delivery_pref.setting + except AttributeError: + pass diff --git a/psyneulink/core/globals/registry.py b/psyneulink/core/globals/registry.py index f29ac1ff7b1..2586761aba4 100644 --- a/psyneulink/core/globals/registry.py +++ b/psyneulink/core/globals/registry.py @@ -16,7 +16,7 @@ from psyneulink.core.globals.keywords import \ CONTROL_PROJECTION, DDM_MECHANISM, GATING_SIGNAL, INPUT_PORT, MAPPING_PROJECTION, OUTPUT_PORT, \ FUNCTION_COMPONENT_CATEGORY, COMPONENT_PREFERENCE_SET, MECHANISM_COMPONENT_CATEGORY, \ - PARAMETER_PORT, PREFERENCE_SET, PROCESS_COMPONENT_CATEGORY, PROJECTION_COMPONENT_CATEGORY, \ + PARAMETER_PORT, PREFERENCE_SET, PROJECTION_COMPONENT_CATEGORY, \ PORT_COMPONENT_CATEGORY __all__ = [ diff --git a/psyneulink/core/globals/sampleiterator.py b/psyneulink/core/globals/sampleiterator.py index 9d12965a231..cfcbf3cad1f 100644 --- a/psyneulink/core/globals/sampleiterator.py +++ b/psyneulink/core/globals/sampleiterator.py @@ -15,14 +15,13 @@ """ -import numpy as np - -import typecheck as tc from collections.abc import Iterator -from inspect import isclass from decimal import Decimal, getcontext +from inspect import isclass from numbers import Number +import numpy as np +import typecheck as tc __all__ = ['SampleSpec', 'SampleIterator'] @@ -91,7 +90,7 @@ class SampleSpec(): complete. * if **num** is not specified, the **function** is called once on each iteration, and iteration may continue - indefintely. + indefinitely. (3) Specify a custom_spec, that is passed to SampleIterator unmodified. diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 1200048873d..351775aed03 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -195,15 +195,15 @@ def is_modulation_operation(val): return get_modulationOperation_name(val) def get_modulationOperation_name(operation): - x = operation(1, 2) - if x == 1: - return MODULATION_OVERRIDE - elif x == 2: - return MODULATION_MULTIPLY - elif x == 3: - return MODULATION_ADD - else: - return False + x = operation(1, 2) + if x == 1: + return MODULATION_OVERRIDE + elif x == 2: + return MODULATION_MULTIPLY + elif x == 3: + return MODULATION_ADD + else: + return False @@ -238,8 +238,8 @@ def __new__(component_type): obj._value_ = value return obj -# ******************************** GLOBAL STRUCTURES, CONSTANTS AND METHODS ******************************************* +# ******************************** GLOBAL STRUCTURES, CONSTANTS AND METHODS ******************************************* TEST_CONDTION = False @@ -1204,7 +1204,7 @@ def __getitem__(self, key): key_num = self._get_key_for_item(key) if key_num is None: # raise TypeError("\'{}\' is not a key in the {} being addressed". - # format(key, self.__class__.__name__)) + # format(key, self.__class__.__name__)) # raise KeyError("\'{}\' is not a key in {}". raise TypeError("\'{}\' is not a key in {}". format(key, self.name)) @@ -1540,6 +1540,43 @@ def flatten_list(l): return [item for sublist in l for item in sublist] +# Seeds and randomness + +class SeededRandomState(np.random.RandomState): + def __init__(self, *args, **kwargs): + # Extract seed + self.used_seed = (kwargs.get('seed', None) or args[0])[:] + super().__init__(*args, **kwargs) + + def __deepcopy__(self, memo): + # There's no easy way to deepcopy parent first. + # Create new instance and rewrite the state. + dup = type(self)(seed=self.used_seed) + dup.set_state(self.get_state()) + return dup + + def seed(self, seed): + assert False, "Use 'seed' parameter instead of seeding the random state directly" + + +class _SeededPhilox(np.random.Generator): + def __init__(self, *args, **kwargs): + # Extract seed + self.used_seed = (kwargs.get('seed', None) or args[0])[:] + state = np.random.Philox([self.used_seed]) + super().__init__(state) + + def __deepcopy__(self, memo): + # There's no easy way to deepcopy parent first. + # Create new instance and rewrite the state. + dup = type(self)(seed=self.used_seed) + dup.bit_generator.state = self.bit_generator.state + return dup + + def seed(self, seed): + assert False, "Use 'seed' parameter instead of seeding the random state directly" + + _seed = np.int32((time.time() * 1000) % 2**31) def get_global_seed(offset=1): global _seed diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 3134fabc0fb..e971df3d616 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -17,7 +17,6 @@ from llvmlite import ir -from . import builtins from . import codegen from .builder_context import * from .builder_context import _all_modules, _convert_llvm_ir_to_ctype @@ -47,15 +46,11 @@ class ExecutionMode(enum.Flag): def _compiled_modules() -> Set[ir.Module]: - if ptx_enabled: - return _cpu_engine.compiled_modules | _ptx_engine.compiled_modules - return _cpu_engine.compiled_modules + return set().union(*(e.compiled_modules for e in _get_engines())) def _staged_modules() -> Set[ir.Module]: - if ptx_enabled: - return _cpu_engine.staged_modules | _ptx_engine.staged_modules - return _cpu_engine.staged_modules + return set().union(*(e.staged_modules for e in _get_engines())) def _llvm_build(target_generation=_binary_generation + 1): @@ -68,9 +63,8 @@ def _llvm_build(target_generation=_binary_generation + 1): if "compile" in debug_env: print("STAGING GENERATION: {} -> {}".format(_binary_generation, target_generation)) - _cpu_engine.stage_compilation(_modules) - if ptx_enabled: - _ptx_engine.stage_compilation(_modules) + for e in _get_engines(): + e.stage_compilation(_modules) _modules.clear() # update binary generation @@ -84,6 +78,12 @@ def __init__(self, name: str): self.__c_func = None self.__cuda_kernel = None + # Make sure builder context is initialized + LLVMBuilderContext.get_current() + + # Compile any pending modules + _llvm_build(LLVMBuilderContext._llvm_generation) + # Function signature # We could skip compilation if the function is in _compiled_models, # but that happens rarely @@ -106,6 +106,9 @@ def __init__(self, name: str): @property def c_func(self): if self.__c_func is None: + # This assumes there are potential staged modules. + # The engine had to be instantiated to have staged modules, + # so it's safe to access it directly _cpu_engine.compile_staged() ptr = _cpu_engine._engine.get_function_address(self.name) self.__c_func = self.__c_func_type(ptr) @@ -138,13 +141,12 @@ def cuda_wrap_call(self, *args, threads=1, block_size=128): @staticmethod @functools.lru_cache(maxsize=32) def from_obj(obj, *, tags:frozenset=frozenset()): - name = LLVMBuilderContext.get_global().gen_llvm_function(obj, tags=tags).name + name = LLVMBuilderContext.get_current().gen_llvm_function(obj, tags=tags).name return LLVMBinaryFunction.get(name) @staticmethod @functools.lru_cache(maxsize=32) def get(name: str): - _llvm_build(LLVMBuilderContext._llvm_generation) return LLVMBinaryFunction(name) def get_multi_run(self): @@ -152,65 +154,40 @@ def get_multi_run(self): multirun_llvm = _find_llvm_function(self.name + "_multirun") except ValueError: function = _find_llvm_function(self.name) - with LLVMBuilderContext.get_global() as ctx: + with LLVMBuilderContext.get_current() as ctx: multirun_llvm = codegen.gen_multirun_wrapper(ctx, function) return LLVMBinaryFunction.get(multirun_llvm.name) -_cpu_engine = cpu_jit_engine() -if ptx_enabled: - _ptx_engine = ptx_jit_engine() - - -# Initialize builtins -def init_builtins(): - start = time.perf_counter() - with LLVMBuilderContext.get_global() as ctx: - # Numeric - builtins.setup_pnl_intrinsics(ctx) - builtins.setup_csch(ctx) - builtins.setup_coth(ctx) - builtins.setup_tanh(ctx) - builtins.setup_is_close(ctx) - - # PRNG - builtins.setup_mersenne_twister(ctx) - - # Matrix/Vector - builtins.setup_vxm(ctx) - builtins.setup_vxm_transposed(ctx) - builtins.setup_vec_add(ctx) - builtins.setup_vec_sum(ctx) - builtins.setup_mat_add(ctx) - builtins.setup_vec_sub(ctx) - builtins.setup_mat_sub(ctx) - builtins.setup_vec_hadamard(ctx) - builtins.setup_mat_hadamard(ctx) - builtins.setup_vec_scalar_mult(ctx) - builtins.setup_mat_scalar_mult(ctx) - builtins.setup_mat_scalar_add(ctx) - - finish = time.perf_counter() - - if "time_stat" in debug_env: - print("Time to setup PNL builtins: {}".format(finish - start)) +_cpu_engine = None +_ptx_engine = None -def cleanup(): - _cpu_engine.clean_module() - _cpu_engine.staged_modules.clear() - _cpu_engine.compiled_modules.clear() +def _get_engines(): + global _cpu_engine + if _cpu_engine is None: + _cpu_engine = cpu_jit_engine() + + global _ptx_engine if ptx_enabled: - _ptx_engine.clean_module() - _ptx_engine.staged_modules.clear() - _ptx_engine.compiled_modules.clear() + if _ptx_engine is None: + _ptx_engine = ptx_jit_engine() + return [_cpu_engine, _ptx_engine] + + return [_cpu_engine] + + + +def cleanup(): + global _cpu_engine + _cpu_engine = None + global _ptx_engine + _ptx_engine = None _modules.clear() _all_modules.clear() LLVMBinaryFunction.get.cache_clear() LLVMBinaryFunction.from_obj.cache_clear() - init_builtins() - -init_builtins() + LLVMBuilderContext.clear_global() diff --git a/psyneulink/core/llvm/builder_context.py b/psyneulink/core/llvm/builder_context.py index fc6d1318553..42a95aa9fc1 100644 --- a/psyneulink/core/llvm/builder_context.py +++ b/psyneulink/core/llvm/builder_context.py @@ -17,13 +17,17 @@ import numpy as np import os import re +import time from typing import Set import weakref + from psyneulink.core.scheduling.time import Time, TimeScale from psyneulink.core.globals.sampleiterator import SampleIterator from psyneulink.core.globals.utilities import ContentAddressableList from psyneulink.core import llvm as pnlvm + from . import codegen +from . import helpers from .debug import debug_env __all__ = ['LLVMBuilderContext', '_modules', '_find_llvm_function'] @@ -39,7 +43,7 @@ def module_count(): if "stat" in debug_env: print("Total LLVM modules: ", len(_all_modules)) print("Total structures generated: ", _struct_count) - s = LLVMBuilderContext.get_global() + s = LLVMBuilderContext.get_current() print("Total generations by global context: {}".format(s._llvm_generation)) print("Object cache in global context: {} hits, {} misses".format(s._stats["cache_requests"] - s._stats["cache_misses"], s._stats["cache_misses"])) for stat in ("input", "output", "param", "state", "data"): @@ -51,7 +55,8 @@ def module_count(): _BUILTIN_PREFIX = "__pnl_builtin_" -_builtin_intrinsics = frozenset(('pow', 'log', 'exp', 'tanh', 'coth', 'csch', 'is_close')) +_builtin_intrinsics = frozenset(('pow', 'log', 'exp', 'tanh', 'coth', 'csch', 'is_close', 'mt_rand_init', + 'philox_rand_init')) class _node_wrapper(): @@ -81,14 +86,15 @@ def wrapper(bctx, obj): class LLVMBuilderContext: - __global_context = None + __current_context = None __uniq_counter = 0 _llvm_generation = 0 int32_ty = ir.IntType(32) - float_ty = ir.DoubleType() + default_float_ty = ir.DoubleType() bool_ty = ir.IntType(1) - def __init__(self): + def __init__(self, float_ty): + assert LLVMBuilderContext.__current_context is None self._modules = [] self._cache = weakref.WeakKeyDictionary() self._stats = { "cache_misses":0, @@ -100,6 +106,9 @@ def __init__(self): "input_structs_generated":0, "output_structs_generated":0, } + self.float_ty = float_ty + self.init_builtins() + LLVMBuilderContext.__current_context = self def __enter__(self): module = ir.Module(name="PsyNeuLinkModule-" + str(LLVMBuilderContext._llvm_generation)) @@ -119,10 +128,14 @@ def module(self): return self._modules[-1] @classmethod - def get_global(cls): - if cls.__global_context is None: - cls.__global_context = LLVMBuilderContext() - return cls.__global_context + def get_current(cls): + if cls.__current_context is None: + return LLVMBuilderContext(cls.default_float_ty) + return cls.__current_context + + @classmethod + def clear_global(cls): + cls.__current_context = None @classmethod def get_unique_name(cls, name: str): @@ -130,6 +143,52 @@ def get_unique_name(cls, name: str): name = re.sub(r"[^a-zA-Z0-9_]", "_", name) return name + '_' + str(cls.__uniq_counter) + def init_builtins(self): + start = time.perf_counter() + with self as ctx: + # Numeric + pnlvm.builtins.setup_pnl_intrinsics(ctx) + pnlvm.builtins.setup_csch(ctx) + pnlvm.builtins.setup_coth(ctx) + pnlvm.builtins.setup_tanh(ctx) + pnlvm.builtins.setup_is_close(ctx) + + # PRNG + pnlvm.builtins.setup_mersenne_twister(ctx) + pnlvm.builtins.setup_philox(ctx) + + # Matrix/Vector + pnlvm.builtins.setup_vxm(ctx) + pnlvm.builtins.setup_vxm_transposed(ctx) + pnlvm.builtins.setup_vec_add(ctx) + pnlvm.builtins.setup_vec_sum(ctx) + pnlvm.builtins.setup_mat_add(ctx) + pnlvm.builtins.setup_vec_sub(ctx) + pnlvm.builtins.setup_mat_sub(ctx) + pnlvm.builtins.setup_vec_hadamard(ctx) + pnlvm.builtins.setup_mat_hadamard(ctx) + pnlvm.builtins.setup_vec_scalar_mult(ctx) + pnlvm.builtins.setup_mat_scalar_mult(ctx) + pnlvm.builtins.setup_mat_scalar_add(ctx) + + finish = time.perf_counter() + + if "time_stat" in debug_env: + print("Time to setup PNL builtins: {}".format(finish - start)) + + def get_uniform_dist_function_by_state(self, state): + if len(state.type.pointee) == 5: + return self.import_llvm_function("__pnl_builtin_mt_rand_double") + if len(state.type.pointee) == 7: + return self.import_llvm_function("__pnl_builtin_philox_rand_{}".format(str(self.float_ty))) + + def get_normal_dist_function_by_state(self, state): + if len(state.type.pointee) == 5: + return self.import_llvm_function("__pnl_builtin_mt_rand_normal") + if len(state.type.pointee) == 7: + # Normal exists only for self.float_ty + return self.import_llvm_function("__pnl_builtin_philox_rand_normal") + def get_builtin(self, name: str, args=[], function_type=None): if name in _builtin_intrinsics: return self.import_llvm_function(_BUILTIN_PREFIX + name) @@ -188,6 +247,36 @@ def import_llvm_function(self, fun, *, tags:frozenset=frozenset()) -> ir.Functio return decl_f return f + def get_random_state_ptr(self, builder, component, state, params): + random_state_ptr = helpers.get_state_ptr(builder, component, state, "random_state") + + + # Used seed is the last member of both MT state and Philox state + seed_idx = len(random_state_ptr.type.pointee) - 1 + used_seed_ptr = builder.gep(random_state_ptr, [self.int32_ty(0), self.int32_ty(seed_idx)]) + used_seed = builder.load(used_seed_ptr) + + seed_ptr = helpers.get_param_ptr(builder, component, params, "seed") + if isinstance(seed_ptr.type.pointee, ir.ArrayType): + # Modulated params are usually single element arrays + seed_ptr = builder.gep(seed_ptr, [self.int32_ty(0), self.int32_ty(0)]) + new_seed = builder.load(seed_ptr) + # FIXME: The seed should ideally be integer already. + # However, it can be modulated and we don't support, + # passing integer values as computed results. + new_seed = builder.fptoui(new_seed, used_seed.type) + + seeds_cmp = builder.icmp_unsigned("!=", used_seed, new_seed) + with builder.if_then(seeds_cmp, likely=False): + if seed_idx == 4: + reseed_f = self.get_builtin("mt_rand_init") + elif seed_idx == 6: + reseed_f = self.get_builtin("philox_rand_init") + + builder.call(reseed_f, [random_state_ptr, new_seed]) + + return random_state_ptr + @staticmethod def get_debug_location(func: ir.Function, component): if "debug_info" not in debug_env: @@ -266,6 +355,8 @@ def _param_struct(p): val = np.asfarray(val).flatten() elif p.name == 'num_estimates': # Should always be int val = np.int32(0) if val is None else np.int32(val) + elif p.name == 'num_trials_per_estimate': # Should always be int + val = np.int32(0) if val is None else np.int32(val) elif np.ndim(val) == 0 and component._is_param_modulated(p): val = [val] # modulation adds array wrap return self.convert_python_struct_to_llvm_ir(val) @@ -336,6 +427,9 @@ def convert_python_struct_to_llvm_ir(self, t): return self.convert_python_struct_to_llvm_ir(t.tolist()) elif isinstance(t, np.random.RandomState): return pnlvm.builtins.get_mersenne_twister_state_struct(self) + elif isinstance(t, np.random.Generator): + assert isinstance(t.bit_generator, np.random.Philox) + return pnlvm.builtins.get_philox_state_struct(self) elif isinstance(t, Time): return ir.ArrayType(self.int32_ty, len(TimeScale)) elif isinstance(t, SampleIterator): @@ -449,7 +543,9 @@ def _convert_llvm_ir_to_ctype(t: ir.Type): if type_t is ir.VoidType: return None elif type_t is ir.IntType: - if t.width == 8: + if t.width == 1: + return ctypes.c_bool + elif t.width == 8: return ctypes.c_int8 elif t.width == 16: return ctypes.c_int16 diff --git a/psyneulink/core/llvm/builtins.py b/psyneulink/core/llvm/builtins.py index cb3ea246fc7..859a740af0c 100644 --- a/psyneulink/core/llvm/builtins.py +++ b/psyneulink/core/llvm/builtins.py @@ -527,6 +527,8 @@ def _setup_mt_rand_init_scalar(ctx, state_ty): pidx = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(1)]) builder.store(pidx.type.pointee(_MERSENNE_N), pidx) + seed_p = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(4)]) + builder.store(seed, seed_p) builder.ret_void() return builder.function @@ -615,6 +617,10 @@ def _setup_mt_rand_init(ctx, state_ty, init_scalar): # set the 0th element to INT_MIN builder.store(a_0.type.pointee(0x80000000), a_0) + + # store used seed + used_seed_p = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(4)]) + builder.store(seed, used_seed_p) builder.ret_void() return builder.function @@ -842,7 +848,8 @@ def get_mersenne_twister_state_struct(ctx): ir.ArrayType(ctx.int32_ty, _MERSENNE_N), # array ctx.int32_ty, # index ctx.int32_ty, # last_gauss available - ctx.float_ty]) # last_gauss + ctx.float_ty, # last_gauss + ctx.int32_ty]) # used seed def setup_mersenne_twister(ctx): @@ -854,3 +861,1149 @@ def setup_mersenne_twister(ctx): gen_int = _setup_mt_rand_integer(ctx, state_ty) gen_float = _setup_mt_rand_float(ctx, state_ty, gen_int) _setup_mt_rand_normal(ctx, state_ty, gen_float) + + +_PHILOX_DEFAULT_ROUNDS = 10 +_PHILOX_DEFAULT_BUFFER_SIZE = 4 +_PHILOX_INIT_A = 0x43b0d7e5 +_PHILOX_MULT_A = 0x931e8875 +_PHILOX_MIX_MULT_L = 0xca01f9dd +_PHILOX_MIX_MULT_R = 0x4973f715 +_PHILOX_INIT_B = 0x8b51f9dd +_PHILOX_MULT_B = 0x58f38ded + + +def _hash_mix(builder, a, hash_const): + val = builder.xor(a, hash_const) + hash_const = builder.mul(hash_const, hash_const.type(_PHILOX_MULT_A)) + val = builder.mul(val, hash_const) + # XSHIFT sizeof(uint32) * 8 // 2 == 16 + val_sh = builder.lshr(val, val.type(16)) + val = builder.xor(val, val_sh) + return val, hash_const + +def _mix(builder, a, b): + val_a = builder.mul(a, a.type(_PHILOX_MIX_MULT_L)) + val_b = builder.mul(b, b.type(_PHILOX_MIX_MULT_R)) + + val = builder.sub(val_a, val_b) + # XSHIFT sizeof(uint32) * 8 // 2 == 16 + val_sh = builder.lshr(val, val.type(16)) + return builder.xor(val, val_sh) + + +def _setup_philox_rand_init(ctx, state_ty): + seed_ty = ir.IntType(64) + builder = _setup_builtin_func_builder(ctx, "philox_rand_init", (state_ty.as_pointer(), seed_ty)) + state, seed = builder.function.args + + # Most of the state is set to 0 + builder.store(state.type.pointee(None), state) + + # reset buffer position to max + buffer_pos_ptr = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(4)]) + assert buffer_pos_ptr.type.pointee.width == 16 + builder.store(buffer_pos_ptr.type.pointee(_PHILOX_DEFAULT_BUFFER_SIZE), + buffer_pos_ptr) + + # np calls '_seed_seq.generate_state(2, np.int64) to get the key + # the passed seed is used as an entropy array + # for np.SeedSeq, which in turn generates 2x64 bit words to + # use as key. + + # 1.) Generate SeedSeq entropy pool + # 1. a) Generate assembled entropy based on the provided seed + assembled_entropy = ir.ArrayType(ctx.int32_ty, 4)(None) + seed_lo = builder.trunc(seed, ctx.int32_ty) + seed_hi = builder.lshr(seed, seed.type(32)) + seed_hi = builder.trunc(seed_hi, ctx.int32_ty) + + assembled_entropy = builder.insert_value(assembled_entropy, seed_lo, 0) + assembled_entropy = builder.insert_value(assembled_entropy, seed_hi, 1) + + # 1. b) Mix assembled entropy to the pool + entropy_pool = ir.ArrayType(ctx.int32_ty, 4)(None) + # any diff would be filled with 0, + # so we might as well force the same size + assert len(entropy_pool.type) == len(assembled_entropy.type) + + # First perturb the entropy with some magic constants + hash_const = ctx.int32_ty(_PHILOX_INIT_A) + for i in range(len(entropy_pool.type)): + ent_val = builder.extract_value(assembled_entropy, i) + new_val, hash_const = _hash_mix(builder, ent_val, hash_const) + + entropy_pool = builder.insert_value(entropy_pool, new_val, i) + + # Next perturb the entropy with itself + for i_src in range(len(entropy_pool.type)): + for i_dst in range(len(entropy_pool.type)): + if i_src != i_dst: + src_val = builder.extract_value(entropy_pool, i_src) + dst_val = builder.extract_value(entropy_pool, i_dst) + + new_val, hash_const = _hash_mix(builder, src_val, hash_const) + new_val = _mix(builder, dst_val, new_val) + entropy_pool = builder.insert_value(entropy_pool, new_val, i_dst) + + # 2.) Use the mixed entropy pool to generate 2xi64 keys + hash_const = ctx.int32_ty(_PHILOX_INIT_B) + key_state = ir.ArrayType(ctx.int32_ty, 4)(None) + for i in range(len(key_state.type)): + pool_val = builder.extract_value(entropy_pool, i) + val = builder.xor(pool_val, hash_const) + hash_const = builder.mul(hash_const, hash_const.type(_PHILOX_MULT_B)) + val = builder.mul(val, hash_const) + # XSHIFT sizeof(uint32) * 8 // 2 == 16 + val_sh = builder.lshr(val, val.type(16)) + val = builder.xor(val, val_sh) + key_state = builder.insert_value(key_state, val, i) + + key_state_ptr = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(1)]) + for i in range(len(key_state_ptr.type.pointee)): + key_ptr = builder.gep(key_state_ptr, [ctx.int32_ty(0), ctx.int32_ty(i)]) + key_lo = builder.extract_value(key_state, i * 2) + key_lo = builder.zext(key_lo, key_ptr.type.pointee) + key_hi = builder.extract_value(key_state, i * 2 + 1) + key_hi = builder.zext(key_hi, key_ptr.type.pointee) + key_hi = builder.shl(key_hi, key_hi.type(32)) + key = builder.or_(key_lo, key_hi) + builder.store(key, key_ptr) + + # Store used seed + used_seed_ptr = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(6)]) + builder.store(seed, used_seed_ptr) + + builder.ret_void() + + return builder.function + + +def _philox_encode(builder, rounds, value, key): + assert len(value.type) == 4 + assert len(key.type) == 2 + + for i in range(rounds): + # One round of encoding + keys = [builder.extract_value(key, j) for j in range(len(key.type))] + vals = [builder.extract_value(value, k) for k in range(len(value.type))] + lo0, hi0 = helpers.umul_lo_hi(builder, vals[0].type(0xD2E7470EE14C6C93), vals[0]) + lo1, hi1 = helpers.umul_lo_hi(builder, vals[2].type(0xCA5A826395121157), vals[2]) + + new_vals = [None] * len(vals) + new_vals[0] = builder.xor(hi1, vals[1]) + new_vals[0] = builder.xor(new_vals[0], keys[0]) + new_vals[1] = lo1 + new_vals[2] = builder.xor(hi0, vals[3]) + new_vals[2] = builder.xor(new_vals[2], keys[1]) + new_vals[3] = lo0 + for l, new_val in enumerate(new_vals): + value = builder.insert_value(value, new_val, l) + + # Now bump the key + new_key0 = builder.add(keys[0], keys[0].type(0x9E3779B97F4A7C15)) + new_key1 = builder.add(keys[1], keys[1].type(0xBB67AE8584CAA73B)) + key = builder.insert_value(key, new_key0, 0) + key = builder.insert_value(key, new_key1, 1) + + return value + + +def _setup_philox_rand_int64(ctx, state_ty): + int64_ty = ir.IntType(64) + # Generate random number generator function. + builder = _setup_builtin_func_builder(ctx, "philox_rand_int64", (state_ty.as_pointer(), int64_ty.as_pointer())) + state, out = builder.function.args + + counter_ptr = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(0)]) + key_ptr = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(1)]) + buffer_ptr = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(2)]) + buffer_pos_ptr = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(4)]) + + assert buffer_pos_ptr.type.pointee.width == 16 + + # Check if there is a pre-generated value + buffer_pos = builder.load(buffer_pos_ptr) + already_generated = builder.icmp_unsigned("<", buffer_pos, buffer_pos.type(len(buffer_ptr.type.pointee))) + with builder.if_then(already_generated, likely=True): + # Get value from pre-generated buffer + val_ptr = builder.gep(buffer_ptr, [ctx.int32_ty(0), buffer_pos]) + builder.store(builder.load(val_ptr), out) + + # Update buffer position + buffer_pos = builder.add(buffer_pos, buffer_pos.type(1)) + builder.store(buffer_pos, buffer_pos_ptr) + builder.ret_void() + + + # Generate 4 new numbers + + # "counter" is 256 bit wide split into 4 64b integers. + # field i should only be incremented if all fields > 11) * (1.0 / 9007199254740992.0) + rhs = double_ty(1.0 / 9007199254740992.0) + + # Generate random integer + lhs_ptr = builder.alloca(gen_int64.args[1].type.pointee) + builder.call(gen_int64, [state, lhs_ptr]) + + # convert to float + lhs_int = builder.load(lhs_ptr) + lhs_shift = builder.lshr(lhs_int, lhs_int.type(11)) + lhs = builder.uitofp(lhs_shift, double_ty) + + res = builder.fmul(lhs, rhs) + builder.store(res, out) + + builder.ret_void() + + return builder.function + + +def _setup_philox_rand_float(ctx, state_ty, gen_int32): + # Generate random float number generator function + float_ty = ir.FloatType() + builder = _setup_builtin_func_builder(ctx, "philox_rand_float", (state_ty.as_pointer(), float_ty.as_pointer())) + state, out = builder.function.args + + # (next_uint32(bitgen_state) >> 9) * (1.0f / 8388608.0f); + rhs = float_ty(1.0 / 8388608.0) + + # Generate random integer + lhs_ptr = builder.alloca(gen_int32.args[1].type.pointee) + builder.call(gen_int32, [state, lhs_ptr]) + + # convert to float + lhs_int = builder.load(lhs_ptr) + lhs_shift = builder.lshr(lhs_int, lhs_int.type(9)) + lhs = builder.uitofp(lhs_shift, float_ty) + + res = builder.fmul(lhs, rhs) + builder.store(res, out) + + builder.ret_void() + + return builder.function + + +# Taken from numpy +_wi_double_data = [ + 8.68362706080130616677e-16, 4.77933017572773682428e-17, + 6.35435241740526230246e-17, 7.45487048124769627714e-17, + 8.32936681579309972857e-17, 9.06806040505948228243e-17, + 9.71486007656776183958e-17, 1.02947503142410192108e-16, + 1.08234302884476839838e-16, 1.13114701961090307945e-16, + 1.17663594570229211411e-16, 1.21936172787143633280e-16, + 1.25974399146370927864e-16, 1.29810998862640315416e-16, + 1.33472037368241227547e-16, 1.36978648425712032797e-16, + 1.40348230012423820659e-16, 1.43595294520569430270e-16, + 1.46732087423644219083e-16, 1.49769046683910367425e-16, + 1.52715150035961979750e-16, 1.55578181694607639484e-16, + 1.58364940092908853989e-16, 1.61081401752749279325e-16, + 1.63732852039698532012e-16, 1.66323990584208352778e-16, + 1.68859017086765964015e-16, 1.71341701765596607184e-16, + 1.73775443658648593310e-16, 1.76163319230009959832e-16, + 1.78508123169767272927e-16, 1.80812402857991522674e-16, + 1.83078487648267501776e-16, 1.85308513886180189386e-16, + 1.87504446393738816849e-16, 1.89668097007747596212e-16, + 1.91801140648386198029e-16, 1.93905129306251037069e-16, + 1.95981504266288244037e-16, 1.98031606831281739736e-16, + 2.00056687762733300198e-16, 2.02057915620716538808e-16, + 2.04036384154802118313e-16, 2.05993118874037063144e-16, + 2.07929082904140197311e-16, 2.09845182223703516690e-16, + 2.11742270357603418769e-16, 2.13621152594498681022e-16, + 2.15482589785814580926e-16, 2.17327301775643674990e-16, + 2.19155970504272708519e-16, 2.20969242822353175995e-16, + 2.22767733047895534948e-16, 2.24552025294143552381e-16, + 2.26322675592856786566e-16, 2.28080213834501706782e-16, + 2.29825145544246839061e-16, 2.31557953510408037008e-16, + 2.33279099280043561128e-16, 2.34989024534709550938e-16, + 2.36688152357916037468e-16, 2.38376888404542434981e-16, + 2.40055621981350627349e-16, 2.41724727046750252175e-16, + 2.43384563137110286400e-16, 2.45035476226149539878e-16, + 2.46677799523270498158e-16, 2.48311854216108767769e-16, + 2.49937950162045242375e-16, 2.51556386532965786439e-16, + 2.53167452417135826983e-16, 2.54771427381694417303e-16, + 2.56368581998939683749e-16, 2.57959178339286723500e-16, + 2.59543470433517070146e-16, 2.61121704706701939097e-16, + 2.62694120385972564623e-16, 2.64260949884118951286e-16, + 2.65822419160830680292e-16, 2.67378748063236329361e-16, + 2.68930150647261591777e-16, 2.70476835481199518794e-16, + 2.72019005932773206655e-16, 2.73556860440867908686e-16, + 2.75090592773016664571e-16, 2.76620392269639032183e-16, + 2.78146444075954410103e-16, 2.79668929362423005309e-16, + 2.81188025534502074329e-16, 2.82703906432447923059e-16, + 2.84216742521840606520e-16, 2.85726701075460149289e-16, + 2.87233946347097994381e-16, 2.88738639737848191815e-16, + 2.90240939955384233230e-16, 2.91741003166694553259e-16, + 2.93238983144718163965e-16, 2.94735031409293489611e-16, + 2.96229297362806647792e-16, 2.97721928420902891115e-16, + 2.99213070138601307081e-16, 3.00702866332133102993e-16, + 3.02191459196806151971e-16, 3.03678989421180184427e-16, + 3.05165596297821922381e-16, 3.06651417830895451744e-16, + 3.08136590840829717032e-16, 3.09621251066292253306e-16, + 3.11105533263689296831e-16, 3.12589571304399892784e-16, + 3.14073498269944617203e-16, 3.15557446545280064031e-16, + 3.17041547910402852545e-16, 3.18525933630440648871e-16, + 3.20010734544401137886e-16, 3.21496081152744704901e-16, + 3.22982103703941557538e-16, 3.24468932280169778077e-16, + 3.25956696882307838340e-16, 3.27445527514370671802e-16, + 3.28935554267536967851e-16, 3.30426907403912838589e-16, + 3.31919717440175233652e-16, 3.33414115231237245918e-16, + 3.34910232054077845412e-16, 3.36408199691876507948e-16, + 3.37908150518594979994e-16, 3.39410217584148914282e-16, + 3.40914534700312603713e-16, 3.42421236527501816058e-16, + 3.43930458662583133920e-16, 3.45442337727858401604e-16, + 3.46957011461378353333e-16, 3.48474618808741370700e-16, + 3.49995300016538099813e-16, 3.51519196727607440975e-16, + 3.53046452078274009054e-16, 3.54577210797743572160e-16, + 3.56111619309838843415e-16, 3.57649825837265051035e-16, + 3.59191980508602994994e-16, 3.60738235468235137839e-16, + 3.62288744989419151904e-16, 3.63843665590734438546e-16, + 3.65403156156136995766e-16, 3.66967378058870090021e-16, + 3.68536495289491401456e-16, 3.70110674588289834952e-16, + 3.71690085582382297792e-16, 3.73274900927794352614e-16, + 3.74865296456848868882e-16, 3.76461451331202869131e-16, + 3.78063548200896037651e-16, 3.79671773369794425924e-16, + 3.81286316967837738238e-16, 3.82907373130524317507e-16, + 3.84535140186095955858e-16, 3.86169820850914927119e-16, + 3.87811622433558721164e-16, 3.89460757048192620674e-16, + 3.91117441837820542060e-16, 3.92781899208054153270e-16, + 3.94454357072087711446e-16, 3.96135049107613542983e-16, + 3.97824215026468259474e-16, 3.99522100857856502444e-16, + 4.01228959246062907451e-16, 4.02945049763632792393e-16, + 4.04670639241074995115e-16, 4.06406002114225038723e-16, + 4.08151420790493873480e-16, 4.09907186035326643447e-16, + 4.11673597380302570170e-16, 4.13450963554423599878e-16, + 4.15239602940268833891e-16, 4.17039844056831587498e-16, + 4.18852026071011229572e-16, 4.20676499339901510978e-16, + 4.22513625986204937320e-16, 4.24363780509307796137e-16, + 4.26227350434779809917e-16, 4.28104737005311666397e-16, + 4.29996355916383230161e-16, 4.31902638100262944617e-16, + 4.33824030562279080411e-16, 4.35760997273684900553e-16, + 4.37714020125858747008e-16, 4.39683599951052137423e-16, + 4.41670257615420348435e-16, 4.43674535190656726604e-16, + 4.45696997211204306674e-16, 4.47738232024753387312e-16, + 4.49798853244554968009e-16, 4.51879501313005876278e-16, + 4.53980845187003400947e-16, 4.56103584156742206384e-16, + 4.58248449810956667052e-16, 4.60416208163115281428e-16, + 4.62607661954784567754e-16, 4.64823653154320737780e-16, + 4.67065065671263059081e-16, 4.69332828309332890697e-16, + 4.71627917983835129766e-16, 4.73951363232586715165e-16, + 4.76304248053313737663e-16, 4.78687716104872284247e-16, + 4.81102975314741720538e-16, 4.83551302941152515162e-16, + 4.86034051145081195402e-16, 4.88552653135360343280e-16, + 4.91108629959526955862e-16, 4.93703598024033454728e-16, + 4.96339277440398725619e-16, 4.99017501309182245754e-16, + 5.01740226071808946011e-16, 5.04509543081872748637e-16, + 5.07327691573354207058e-16, 5.10197073234156184149e-16, + 5.13120268630678373200e-16, 5.16100055774322824569e-16, + 5.19139431175769859873e-16, 5.22241633800023428760e-16, + 5.25410172417759732697e-16, 5.28648856950494511482e-16, + 5.31961834533840037535e-16, 5.35353631181649688145e-16, + 5.38829200133405320160e-16, 5.42393978220171234073e-16, + 5.46053951907478041166e-16, 5.49815735089281410703e-16, + 5.53686661246787600374e-16, 5.57674893292657647836e-16, + 5.61789555355541665830e-16, 5.66040892008242216739e-16, + 5.70440462129138908417e-16, 5.75001376891989523684e-16, + 5.79738594572459365014e-16, 5.84669289345547900201e-16, + 5.89813317647789942685e-16, 5.95193814964144415532e-16, + 6.00837969627190832234e-16, 6.06778040933344851394e-16, + 6.13052720872528159123e-16, 6.19708989458162555387e-16, + 6.26804696330128439415e-16, 6.34412240712750598627e-16, + 6.42623965954805540945e-16, 6.51560331734499356881e-16, + 6.61382788509766415145e-16, 6.72315046250558662913e-16, + 6.84680341756425875856e-16, 6.98971833638761995415e-16, + 7.15999493483066421560e-16, 7.37242430179879890722e-16, + 7.65893637080557275482e-16, 8.11384933765648418565e-16] + +# Taken from numpy +_ki_i64_data = [ + 0x000EF33D8025EF6A, 0x0000000000000000, 0x000C08BE98FBC6A8, + 0x000DA354FABD8142, 0x000E51F67EC1EEEA, 0x000EB255E9D3F77E, + 0x000EEF4B817ECAB9, 0x000F19470AFA44AA, 0x000F37ED61FFCB18, + 0x000F4F469561255C, 0x000F61A5E41BA396, 0x000F707A755396A4, + 0x000F7CB2EC28449A, 0x000F86F10C6357D3, 0x000F8FA6578325DE, + 0x000F9724C74DD0DA, 0x000F9DA907DBF509, 0x000FA360F581FA74, + 0x000FA86FDE5B4BF8, 0x000FACF160D354DC, 0x000FB0FB6718B90F, + 0x000FB49F8D5374C6, 0x000FB7EC2366FE77, 0x000FBAECE9A1E50E, + 0x000FBDAB9D040BED, 0x000FC03060FF6C57, 0x000FC2821037A248, + 0x000FC4A67AE25BD1, 0x000FC6A2977AEE31, 0x000FC87AA92896A4, + 0x000FCA325E4BDE85, 0x000FCBCCE902231A, 0x000FCD4D12F839C4, + 0x000FCEB54D8FEC99, 0x000FD007BF1DC930, 0x000FD1464DD6C4E6, + 0x000FD272A8E2F450, 0x000FD38E4FF0C91E, 0x000FD49A9990B478, + 0x000FD598B8920F53, 0x000FD689C08E99EC, 0x000FD76EA9C8E832, + 0x000FD848547B08E8, 0x000FD9178BAD2C8C, 0x000FD9DD07A7ADD2, + 0x000FDA9970105E8C, 0x000FDB4D5DC02E20, 0x000FDBF95C5BFCD0, + 0x000FDC9DEBB99A7D, 0x000FDD3B8118729D, 0x000FDDD288342F90, + 0x000FDE6364369F64, 0x000FDEEE708D514E, 0x000FDF7401A6B42E, + 0x000FDFF46599ED40, 0x000FE06FE4BC24F2, 0x000FE0E6C225A258, + 0x000FE1593C28B84C, 0x000FE1C78CBC3F99, 0x000FE231E9DB1CAA, + 0x000FE29885DA1B91, 0x000FE2FB8FB54186, 0x000FE35B33558D4A, + 0x000FE3B799D0002A, 0x000FE410E99EAD7F, 0x000FE46746D47734, + 0x000FE4BAD34C095C, 0x000FE50BAED29524, 0x000FE559F74EBC78, + 0x000FE5A5C8E41212, 0x000FE5EF3E138689, 0x000FE6366FD91078, + 0x000FE67B75C6D578, 0x000FE6BE661E11AA, 0x000FE6FF55E5F4F2, + 0x000FE73E5900A702, 0x000FE77B823E9E39, 0x000FE7B6E37070A2, + 0x000FE7F08D774243, 0x000FE8289053F08C, 0x000FE85EFB35173A, + 0x000FE893DC840864, 0x000FE8C741F0CEBC, 0x000FE8F9387D4EF6, + 0x000FE929CC879B1D, 0x000FE95909D388EA, 0x000FE986FB939AA2, + 0x000FE9B3AC714866, 0x000FE9DF2694B6D5, 0x000FEA0973ABE67C, + 0x000FEA329CF166A4, 0x000FEA5AAB32952C, 0x000FEA81A6D5741A, + 0x000FEAA797DE1CF0, 0x000FEACC85F3D920, 0x000FEAF07865E63C, + 0x000FEB13762FEC13, 0x000FEB3585FE2A4A, 0x000FEB56AE3162B4, + 0x000FEB76F4E284FA, 0x000FEB965FE62014, 0x000FEBB4F4CF9D7C, + 0x000FEBD2B8F449D0, 0x000FEBEFB16E2E3E, 0x000FEC0BE31EBDE8, + 0x000FEC2752B15A15, 0x000FEC42049DAFD3, 0x000FEC5BFD29F196, + 0x000FEC75406CEEF4, 0x000FEC8DD2500CB4, 0x000FECA5B6911F12, + 0x000FECBCF0C427FE, 0x000FECD38454FB15, 0x000FECE97488C8B3, + 0x000FECFEC47F91B7, 0x000FED1377358528, 0x000FED278F844903, + 0x000FED3B10242F4C, 0x000FED4DFBAD586E, 0x000FED605498C3DD, + 0x000FED721D414FE8, 0x000FED8357E4A982, 0x000FED9406A42CC8, + 0x000FEDA42B85B704, 0x000FEDB3C8746AB4, 0x000FEDC2DF416652, + 0x000FEDD171A46E52, 0x000FEDDF813C8AD3, 0x000FEDED0F909980, + 0x000FEDFA1E0FD414, 0x000FEE06AE124BC4, 0x000FEE12C0D95A06, + 0x000FEE1E579006E0, 0x000FEE29734B6524, 0x000FEE34150AE4BC, + 0x000FEE3E3DB89B3C, 0x000FEE47EE2982F4, 0x000FEE51271DB086, + 0x000FEE59E9407F41, 0x000FEE623528B42E, 0x000FEE6A0B5897F1, + 0x000FEE716C3E077A, 0x000FEE7858327B82, 0x000FEE7ECF7B06BA, + 0x000FEE84D2484AB2, 0x000FEE8A60B66343, 0x000FEE8F7ACCC851, + 0x000FEE94207E25DA, 0x000FEE9851A829EA, 0x000FEE9C0E13485C, + 0x000FEE9F557273F4, 0x000FEEA22762CCAE, 0x000FEEA4836B42AC, + 0x000FEEA668FC2D71, 0x000FEEA7D76ED6FA, 0x000FEEA8CE04FA0A, + 0x000FEEA94BE8333B, 0x000FEEA950296410, 0x000FEEA8D9C0075E, + 0x000FEEA7E7897654, 0x000FEEA678481D24, 0x000FEEA48AA29E83, + 0x000FEEA21D22E4DA, 0x000FEE9F2E352024, 0x000FEE9BBC26AF2E, + 0x000FEE97C524F2E4, 0x000FEE93473C0A3A, 0x000FEE8E40557516, + 0x000FEE88AE369C7A, 0x000FEE828E7F3DFD, 0x000FEE7BDEA7B888, + 0x000FEE749BFF37FF, 0x000FEE6CC3A9BD5E, 0x000FEE64529E007E, + 0x000FEE5B45A32888, 0x000FEE51994E57B6, 0x000FEE474A0006CF, + 0x000FEE3C53E12C50, 0x000FEE30B2E02AD8, 0x000FEE2462AD8205, + 0x000FEE175EB83C5A, 0x000FEE09A22A1447, 0x000FEDFB27E349CC, + 0x000FEDEBEA76216C, 0x000FEDDBE422047E, 0x000FEDCB0ECE39D3, + 0x000FEDB964042CF4, 0x000FEDA6DCE938C9, 0x000FED937237E98D, + 0x000FED7F1C38A836, 0x000FED69D2B9C02B, 0x000FED538D06AE00, + 0x000FED3C41DEA422, 0x000FED23E76A2FD8, 0x000FED0A732FE644, + 0x000FECEFDA07FE34, 0x000FECD4100EB7B8, 0x000FECB708956EB4, + 0x000FEC98B61230C1, 0x000FEC790A0DA978, 0x000FEC57F50F31FE, + 0x000FEC356686C962, 0x000FEC114CB4B335, 0x000FEBEB948E6FD0, + 0x000FEBC429A0B692, 0x000FEB9AF5EE0CDC, 0x000FEB6FE1C98542, + 0x000FEB42D3AD1F9E, 0x000FEB13B00B2D4B, 0x000FEAE2591A02E9, + 0x000FEAAEAE992257, 0x000FEA788D8EE326, 0x000FEA3FCFFD73E5, + 0x000FEA044C8DD9F6, 0x000FE9C5D62F563B, 0x000FE9843BA947A4, + 0x000FE93F471D4728, 0x000FE8F6BD76C5D6, 0x000FE8AA5DC4E8E6, + 0x000FE859E07AB1EA, 0x000FE804F690A940, 0x000FE7AB488233C0, + 0x000FE74C751F6AA5, 0x000FE6E8102AA202, 0x000FE67DA0B6ABD8, + 0x000FE60C9F38307E, 0x000FE5947338F742, 0x000FE51470977280, + 0x000FE48BD436F458, 0x000FE3F9BFFD1E37, 0x000FE35D35EEB19C, + 0x000FE2B5122FE4FE, 0x000FE20003995557, 0x000FE13C82788314, + 0x000FE068C4EE67B0, 0x000FDF82B02B71AA, 0x000FDE87C57EFEAA, + 0x000FDD7509C63BFD, 0x000FDC46E529BF13, 0x000FDAF8F82E0282, + 0x000FD985E1B2BA75, 0x000FD7E6EF48CF04, 0x000FD613ADBD650B, + 0x000FD40149E2F012, 0x000FD1A1A7B4C7AC, 0x000FCEE204761F9E, + 0x000FCBA8D85E11B2, 0x000FC7D26ECD2D22, 0x000FC32B2F1E22ED, + 0x000FBD6581C0B83A, 0x000FB606C4005434, 0x000FAC40582A2874, + 0x000F9E971E014598, 0x000F89FA48A41DFC, 0x000F66C5F7F0302C, + 0x000F1A5A4B331C4A] + +# Taken from numpy +_fi_double_data = [ + 1.00000000000000000000e+00, 9.77101701267671596263e-01, + 9.59879091800106665211e-01, 9.45198953442299649730e-01, + 9.32060075959230460718e-01, 9.19991505039347012840e-01, + 9.08726440052130879366e-01, 8.98095921898343418910e-01, + 8.87984660755833377088e-01, 8.78309655808917399966e-01, + 8.69008688036857046555e-01, 8.60033621196331532488e-01, + 8.51346258458677951353e-01, 8.42915653112204177333e-01, + 8.34716292986883434679e-01, 8.26726833946221373317e-01, + 8.18929191603702366642e-01, 8.11307874312656274185e-01, + 8.03849483170964274059e-01, 7.96542330422958966274e-01, + 7.89376143566024590648e-01, 7.82341832654802504798e-01, + 7.75431304981187174974e-01, 7.68637315798486264740e-01, + 7.61953346836795386565e-01, 7.55373506507096115214e-01, + 7.48892447219156820459e-01, 7.42505296340151055290e-01, + 7.36207598126862650112e-01, 7.29995264561476231435e-01, + 7.23864533468630222401e-01, 7.17811932630721960535e-01, + 7.11834248878248421200e-01, 7.05928501332754310127e-01, + 7.00091918136511615067e-01, 6.94321916126116711609e-01, + 6.88616083004671808432e-01, 6.82972161644994857355e-01, + 6.77388036218773526009e-01, 6.71861719897082099173e-01, + 6.66391343908750100056e-01, 6.60975147776663107813e-01, + 6.55611470579697264149e-01, 6.50298743110816701574e-01, + 6.45035480820822293424e-01, 6.39820277453056585060e-01, + 6.34651799287623608059e-01, 6.29528779924836690007e-01, + 6.24450015547026504592e-01, 6.19414360605834324325e-01, + 6.14420723888913888899e-01, 6.09468064925773433949e-01, + 6.04555390697467776029e-01, 5.99681752619125263415e-01, + 5.94846243767987448159e-01, 5.90047996332826008015e-01, + 5.85286179263371453274e-01, 5.80559996100790898232e-01, + 5.75868682972353718164e-01, 5.71211506735253227163e-01, + 5.66587763256164445025e-01, 5.61996775814524340831e-01, + 5.57437893618765945014e-01, 5.52910490425832290562e-01, + 5.48413963255265812791e-01, 5.43947731190026262382e-01, + 5.39511234256952132426e-01, 5.35103932380457614215e-01, + 5.30725304403662057062e-01, 5.26374847171684479008e-01, + 5.22052074672321841931e-01, 5.17756517229756352272e-01, + 5.13487720747326958914e-01, 5.09245245995747941592e-01, + 5.05028667943468123624e-01, 5.00837575126148681903e-01, + 4.96671569052489714213e-01, 4.92530263643868537748e-01, + 4.88413284705458028423e-01, 4.84320269426683325253e-01, + 4.80250865909046753544e-01, 4.76204732719505863248e-01, + 4.72181538467730199660e-01, 4.68180961405693596422e-01, + 4.64202689048174355069e-01, 4.60246417812842867345e-01, + 4.56311852678716434184e-01, 4.52398706861848520777e-01, + 4.48506701507203064949e-01, 4.44635565395739396077e-01, + 4.40785034665803987508e-01, 4.36954852547985550526e-01, + 4.33144769112652261445e-01, 4.29354541029441427735e-01, + 4.25583931338021970170e-01, 4.21832709229495894654e-01, + 4.18100649837848226120e-01, 4.14387534040891125642e-01, + 4.10693148270188157500e-01, 4.07017284329473372217e-01, + 4.03359739221114510510e-01, 3.99720314980197222177e-01, + 3.96098818515832451492e-01, 3.92495061459315619512e-01, + 3.88908860018788715696e-01, 3.85340034840077283462e-01, + 3.81788410873393657674e-01, 3.78253817245619183840e-01, + 3.74736087137891138443e-01, 3.71235057668239498696e-01, + 3.67750569779032587814e-01, 3.64282468129004055601e-01, + 3.60830600989648031529e-01, 3.57394820145780500731e-01, + 3.53974980800076777232e-01, 3.50570941481406106455e-01, + 3.47182563956793643900e-01, 3.43809713146850715049e-01, + 3.40452257044521866547e-01, 3.37110066637006045021e-01, + 3.33783015830718454708e-01, 3.30470981379163586400e-01, + 3.27173842813601400970e-01, 3.23891482376391093290e-01, + 3.20623784956905355514e-01, 3.17370638029913609834e-01, + 3.14131931596337177215e-01, 3.10907558126286509559e-01, + 3.07697412504292056035e-01, 3.04501391976649993243e-01, + 3.01319396100803049698e-01, 2.98151326696685481377e-01, + 2.94997087799961810184e-01, 2.91856585617095209972e-01, + 2.88729728482182923521e-01, 2.85616426815501756042e-01, + 2.82516593083707578948e-01, 2.79430141761637940157e-01, + 2.76356989295668320494e-01, 2.73297054068577072172e-01, + 2.70250256365875463072e-01, 2.67216518343561471038e-01, + 2.64195763997261190426e-01, 2.61187919132721213522e-01, + 2.58192911337619235290e-01, 2.55210669954661961700e-01, + 2.52241126055942177508e-01, 2.49284212418528522415e-01, + 2.46339863501263828249e-01, 2.43408015422750312329e-01, + 2.40488605940500588254e-01, 2.37581574431238090606e-01, + 2.34686861872330010392e-01, 2.31804410824338724684e-01, + 2.28934165414680340644e-01, 2.26076071322380278694e-01, + 2.23230075763917484855e-01, 2.20396127480151998723e-01, + 2.17574176724331130872e-01, 2.14764175251173583536e-01, + 2.11966076307030182324e-01, 2.09179834621125076977e-01, + 2.06405406397880797353e-01, 2.03642749310334908452e-01, + 2.00891822494656591136e-01, 1.98152586545775138971e-01, + 1.95425003514134304483e-01, 1.92709036903589175926e-01, + 1.90004651670464985713e-01, 1.87311814223800304768e-01, + 1.84630492426799269756e-01, 1.81960655599522513892e-01, + 1.79302274522847582272e-01, 1.76655321443734858455e-01, + 1.74019770081838553999e-01, 1.71395595637505754327e-01, + 1.68782774801211288285e-01, 1.66181285764481906364e-01, + 1.63591108232365584074e-01, 1.61012223437511009516e-01, + 1.58444614155924284882e-01, 1.55888264724479197465e-01, + 1.53343161060262855866e-01, 1.50809290681845675763e-01, + 1.48286642732574552861e-01, 1.45775208005994028060e-01, + 1.43274978973513461566e-01, 1.40785949814444699690e-01, + 1.38308116448550733057e-01, 1.35841476571253755301e-01, + 1.33386029691669155683e-01, 1.30941777173644358090e-01, + 1.28508722279999570981e-01, 1.26086870220185887081e-01, + 1.23676228201596571932e-01, 1.21276805484790306533e-01, + 1.18888613442910059947e-01, 1.16511665625610869035e-01, + 1.14145977827838487895e-01, 1.11791568163838089811e-01, + 1.09448457146811797824e-01, 1.07116667774683801961e-01, + 1.04796225622487068629e-01, 1.02487158941935246892e-01, + 1.00189498768810017482e-01, 9.79032790388624646338e-02, + 9.56285367130089991594e-02, 9.33653119126910124859e-02, + 9.11136480663737591268e-02, 8.88735920682758862021e-02, + 8.66451944505580717859e-02, 8.44285095703534715916e-02, + 8.22235958132029043366e-02, 8.00305158146630696292e-02, + 7.78493367020961224423e-02, 7.56801303589271778804e-02, + 7.35229737139813238622e-02, 7.13779490588904025339e-02, + 6.92451443970067553879e-02, 6.71246538277884968737e-02, + 6.50165779712428976156e-02, 6.29210244377581412456e-02, + 6.08381083495398780614e-02, 5.87679529209337372930e-02, + 5.67106901062029017391e-02, 5.46664613248889208474e-02, + 5.26354182767921896513e-02, 5.06177238609477817000e-02, + 4.86135532158685421122e-02, 4.66230949019303814174e-02, + 4.46465522512944634759e-02, 4.26841449164744590750e-02, + 4.07361106559409394401e-02, 3.88027074045261474722e-02, + 3.68842156885673053135e-02, 3.49809414617161251737e-02, + 3.30932194585785779961e-02, 3.12214171919203004046e-02, + 2.93659397581333588001e-02, 2.75272356696031131329e-02, + 2.57058040085489103443e-02, 2.39022033057958785407e-02, + 2.21170627073088502113e-02, 2.03510962300445102935e-02, + 1.86051212757246224594e-02, 1.68800831525431419000e-02, + 1.51770883079353092332e-02, 1.34974506017398673818e-02, + 1.18427578579078790488e-02, 1.02149714397014590439e-02, + 8.61658276939872638800e-03, 7.05087547137322242369e-03, + 5.52240329925099155545e-03, 4.03797259336302356153e-03, + 2.60907274610215926189e-03, 1.26028593049859797236e-03] + +# Taken from numpy +_ki_i32_data = [ + 0x007799EC, 0x00000000, 0x006045F5, 0x006D1AA8, 0x00728FB4, + 0x007592AF, 0x00777A5C, 0x0078CA38, 0x0079BF6B, 0x007A7A35, + 0x007B0D2F, 0x007B83D4, 0x007BE597, 0x007C3788, 0x007C7D33, + 0x007CB926, 0x007CED48, 0x007D1B08, 0x007D437F, 0x007D678B, + 0x007D87DB, 0x007DA4FC, 0x007DBF61, 0x007DD767, 0x007DED5D, + 0x007E0183, 0x007E1411, 0x007E2534, 0x007E3515, 0x007E43D5, + 0x007E5193, 0x007E5E67, 0x007E6A69, 0x007E75AA, 0x007E803E, + 0x007E8A32, 0x007E9395, 0x007E9C72, 0x007EA4D5, 0x007EACC6, + 0x007EB44E, 0x007EBB75, 0x007EC243, 0x007EC8BC, 0x007ECEE8, + 0x007ED4CC, 0x007EDA6B, 0x007EDFCB, 0x007EE4EF, 0x007EE9DC, + 0x007EEE94, 0x007EF31B, 0x007EF774, 0x007EFBA0, 0x007EFFA3, + 0x007F037F, 0x007F0736, 0x007F0ACA, 0x007F0E3C, 0x007F118F, + 0x007F14C4, 0x007F17DC, 0x007F1ADA, 0x007F1DBD, 0x007F2087, + 0x007F233A, 0x007F25D7, 0x007F285D, 0x007F2AD0, 0x007F2D2E, + 0x007F2F7A, 0x007F31B3, 0x007F33DC, 0x007F35F3, 0x007F37FB, + 0x007F39F3, 0x007F3BDC, 0x007F3DB7, 0x007F3F84, 0x007F4145, + 0x007F42F8, 0x007F449F, 0x007F463A, 0x007F47CA, 0x007F494E, + 0x007F4AC8, 0x007F4C38, 0x007F4D9D, 0x007F4EF9, 0x007F504C, + 0x007F5195, 0x007F52D5, 0x007F540D, 0x007F553D, 0x007F5664, + 0x007F5784, 0x007F589C, 0x007F59AC, 0x007F5AB5, 0x007F5BB8, + 0x007F5CB3, 0x007F5DA8, 0x007F5E96, 0x007F5F7E, 0x007F605F, + 0x007F613B, 0x007F6210, 0x007F62E0, 0x007F63AA, 0x007F646F, + 0x007F652E, 0x007F65E8, 0x007F669C, 0x007F674C, 0x007F67F6, + 0x007F689C, 0x007F693C, 0x007F69D9, 0x007F6A70, 0x007F6B03, + 0x007F6B91, 0x007F6C1B, 0x007F6CA0, 0x007F6D21, 0x007F6D9E, + 0x007F6E17, 0x007F6E8C, 0x007F6EFC, 0x007F6F68, 0x007F6FD1, + 0x007F7035, 0x007F7096, 0x007F70F3, 0x007F714C, 0x007F71A1, + 0x007F71F2, 0x007F723F, 0x007F7289, 0x007F72CF, 0x007F7312, + 0x007F7350, 0x007F738B, 0x007F73C3, 0x007F73F6, 0x007F7427, + 0x007F7453, 0x007F747C, 0x007F74A1, 0x007F74C3, 0x007F74E0, + 0x007F74FB, 0x007F7511, 0x007F7524, 0x007F7533, 0x007F753F, + 0x007F7546, 0x007F754A, 0x007F754B, 0x007F7547, 0x007F753F, + 0x007F7534, 0x007F7524, 0x007F7511, 0x007F74F9, 0x007F74DE, + 0x007F74BE, 0x007F749A, 0x007F7472, 0x007F7445, 0x007F7414, + 0x007F73DF, 0x007F73A5, 0x007F7366, 0x007F7323, 0x007F72DA, + 0x007F728D, 0x007F723A, 0x007F71E3, 0x007F7186, 0x007F7123, + 0x007F70BB, 0x007F704D, 0x007F6FD9, 0x007F6F5F, 0x007F6EDF, + 0x007F6E58, 0x007F6DCB, 0x007F6D37, 0x007F6C9C, 0x007F6BF9, + 0x007F6B4F, 0x007F6A9C, 0x007F69E2, 0x007F691F, 0x007F6854, + 0x007F677F, 0x007F66A1, 0x007F65B8, 0x007F64C6, 0x007F63C8, + 0x007F62C0, 0x007F61AB, 0x007F608A, 0x007F5F5D, 0x007F5E21, + 0x007F5CD8, 0x007F5B7F, 0x007F5A17, 0x007F589E, 0x007F5713, + 0x007F5575, 0x007F53C4, 0x007F51FE, 0x007F5022, 0x007F4E2F, + 0x007F4C22, 0x007F49FA, 0x007F47B6, 0x007F4553, 0x007F42CF, + 0x007F4028, 0x007F3D5A, 0x007F3A64, 0x007F3741, 0x007F33ED, + 0x007F3065, 0x007F2CA4, 0x007F28A4, 0x007F245F, 0x007F1FCE, + 0x007F1AEA, 0x007F15A9, 0x007F1000, 0x007F09E4, 0x007F0346, + 0x007EFC16, 0x007EF43E, 0x007EEBA8, 0x007EE237, 0x007ED7C8, + 0x007ECC2F, 0x007EBF37, 0x007EB09D, 0x007EA00A, 0x007E8D0D, + 0x007E7710, 0x007E5D47, 0x007E3E93, 0x007E1959, 0x007DEB2C, + 0x007DB036, 0x007D6203, 0x007CF4B9, 0x007C4FD2, 0x007B3630, + 0x0078D2D2] + +# Taken from numpy +_wi_float_data = [ + 4.66198677960027669255e-07, 2.56588335019207033255e-08, + 3.41146697750176784592e-08, 4.00230311410932959821e-08, + 4.47179475877737745459e-08, 4.86837785973537366722e-08, + 5.21562578925932412861e-08, 5.52695199001886257153e-08, + 5.81078488992733116465e-08, 6.07279932024587421409e-08, + 6.31701613261172047795e-08, 6.54639842900233842742e-08, + 6.76319905583641815324e-08, 6.96917493470166688656e-08, + 7.16572544283857476692e-08, 7.35398519048393832969e-08, + 7.53488822443557479279e-08, 7.70921367281667127885e-08, + 7.87761895947956022626e-08, 8.04066446825615346857e-08, + 8.19883218760237408659e-08, 8.35254002936857088917e-08, + 8.50215298165053411740e-08, 8.64799190652369040985e-08, + 8.79034055989140110861e-08, 8.92945125124233511541e-08, + 9.06554945027956262312e-08, 9.19883756905278607229e-08, + 9.32949809202232869780e-08, 9.45769618559625849039e-08, + 9.58358188855612866442e-08, 9.70729196232813152662e-08, + 9.82895146313061088986e-08, 9.94867508514382224721e-08, + 1.00665683139461669691e-07, 1.01827284217853923044e-07, + 1.02972453302539369464e-07, 1.04102023612124921572e-07, + 1.05216768930574060431e-07, 1.06317409364335657741e-07, + 1.07404616410877866490e-07, 1.08479017436113134283e-07, + 1.09541199642370962438e-07, 1.10591713595628691212e-07, + 1.11631076370069356306e-07, 1.12659774359245895023e-07, + 1.13678265795837113569e-07, 1.14686983015899673063e-07, + 1.15686334498432158725e-07, 1.16676706706789039179e-07, + 1.17658465754873988919e-07, 1.18631958917986203582e-07, + 1.19597516005596215528e-07, 1.20555450611113917226e-07, + 1.21506061251817163689e-07, 1.22449632410483948386e-07, + 1.23386435488872536840e-07, 1.24316729681986364321e-07, + 1.25240762781015530062e-07, 1.26158771911939892267e-07, + 1.27070984215989333455e-07, 1.27977617477468922011e-07, + 1.28878880703854958297e-07, 1.29774974662539874521e-07, + 1.30666092378141980504e-07, 1.31552419593887221722e-07, + 1.32434135200211397569e-07, 1.33311411633413359243e-07, + 1.34184415246907777059e-07, 1.35053306657377859830e-07, + 1.35918241067904315860e-07, 1.36779368569952053923e-07, + 1.37636834425917531047e-07, 1.38490779333783508675e-07, + 1.39341339675287344817e-07, 1.40188647748881762555e-07, + 1.41032831988654882776e-07, 1.41874017170273235693e-07, + 1.42712324604921442006e-07, 1.43547872322127921816e-07, + 1.44380775242292721080e-07, 1.45211145339665544509e-07, + 1.46039091796461362146e-07, 1.46864721148745476208e-07, + 1.47688137424670065700e-07, 1.48509442275598857119e-07, + 1.49328735100614641423e-07, 1.50146113164867617390e-07, + 1.50961671712187416111e-07, 1.51775504072350982845e-07, + 1.52587701763369746341e-07, 1.53398354589133671168e-07, + 1.54207550732725568797e-07, 1.55015376845697999657e-07, + 1.55821918133584372604e-07, 1.56627258437898192833e-07, + 1.57431480314857468671e-07, 1.58234665111056041043e-07, + 1.59036893036289199880e-07, 1.59838243233728855017e-07, + 1.60638793847630850137e-07, 1.61438622088746393909e-07, + 1.62237804297600106296e-07, 1.63036416005787357730e-07, + 1.63834531995435479082e-07, 1.64632226356965902954e-07, + 1.65429572545287097020e-07, 1.66226643434541294491e-07, + 1.67023511371523209274e-07, 1.67820248227882200051e-07, + 1.68616925451215588827e-07, 1.69413614115155757272e-07, + 1.70210384968549673733e-07, 1.71007308483826142122e-07, + 1.71804454904642543391e-07, 1.72601894292900061024e-07, + 1.73399696575213681990e-07, 1.74197931588920988271e-07, + 1.74996669127712165834e-07, 1.75795978986961275677e-07, + 1.76595931008838063924e-07, 1.77396595127278238022e-07, + 1.78198041412889183130e-07, 1.79000340117867431104e-07, + 1.79803561721004406185e-07, 1.80607776972855859813e-07, + 1.81413056941151359868e-07, 1.82219473056520464354e-07, + 1.83027097158612474240e-07, 1.83836001542687613069e-07, + 1.84646259006759307383e-07, 1.85457942899367347876e-07, + 1.86271127168064649331e-07, 1.87085886408701333260e-07, + 1.87902295915592424729e-07, 1.88720431732658022414e-07, + 1.89540370705627262627e-07, 1.90362190535400839128e-07, + 1.91185969832669990437e-07, 1.92011788173893651535e-07, + 1.92839726158739913768e-07, 1.93669865469102145482e-07, + 1.94502288929804890433e-07, 1.95337080571120616772e-07, + 1.96174325693223683314e-07, 1.97014110932714374919e-07, + 1.97856524331352952716e-07, 1.98701655407150388211e-07, + 1.99549595227971635348e-07, 2.00400436487814600236e-07, + 2.01254273585938820883e-07, 2.02111202709026498408e-07, + 2.02971321916571014951e-07, 2.03834731229698846698e-07, + 2.04701532723644121196e-07, 2.05571830624108885378e-07, + 2.06445731407757185541e-07, 2.07323343907107312957e-07, + 2.08204779420104330037e-07, 2.09090151824673600213e-07, + 2.09979577698577670508e-07, 2.10873176444920111011e-07, + 2.11771070423665379388e-07, 2.12673385089569268965e-07, + 2.13580249136944118603e-07, 2.14491794651713402832e-07, + 2.15408157271244625533e-07, 2.16329476352486921685e-07, + 2.17255895148978920488e-07, 2.18187560997337924713e-07, + 2.19124625513888206785e-07, 2.20067244802139479285e-07, + 2.21015579671883851683e-07, 2.21969795870742159701e-07, + 2.22930064329060010376e-07, 2.23896561419128954210e-07, + 2.24869469229791575583e-07, 2.25848975857580322189e-07, + 2.26835275715640744118e-07, 2.27828569861799901001e-07, + 2.28829066347263833069e-07, 2.29836980587561823183e-07, + 2.30852535757505260518e-07, 2.31875963212094114516e-07, + 2.32907502935486642699e-07, 2.33947404020352726160e-07, + 2.34995925180156140289e-07, 2.36053335297164516378e-07, + 2.37119914009265667728e-07, 2.38195952338983970691e-07, + 2.39281753368440712742e-07, 2.40377632964396957621e-07, + 2.41483920557958384709e-07, 2.42600959984018662258e-07, + 2.43729110386077326413e-07, 2.44868747192698939290e-07, + 2.46020263172594533433e-07, 2.47184069576113545901e-07, + 2.48360597371852893654e-07, 2.49550298588131851232e-07, + 2.50753647770270890721e-07, 2.51971143565970967140e-07, + 2.53203310452642767375e-07, 2.54450700622322097890e-07, + 2.55713896041856770961e-07, 2.56993510708419870887e-07, + 2.58290193123138874550e-07, 2.59604629008804833146e-07, + 2.60937544301314385690e-07, 2.62289708448800566945e-07, + 2.63661938057441759882e-07, 2.65055100928844238758e-07, + 2.66470120540847889467e-07, 2.67907981031821866252e-07, + 2.69369732758258246335e-07, 2.70856498507068313229e-07, + 2.72369480457841388042e-07, 2.73909968006952220135e-07, + 2.75479346585437289399e-07, 2.77079107626811561009e-07, + 2.78710859870496796972e-07, 2.80376342222588603820e-07, + 2.82077438439999912690e-07, 2.83816193958769527230e-07, + 2.85594835255375795814e-07, 2.87415792215003905739e-07, + 2.89281724087851835900e-07, 2.91195549750371467233e-07, + 2.93160483161771875581e-07, 2.95180075129332912389e-07, + 2.97258262785797916083e-07, 2.99399428561531794298e-07, + 3.01608470935804138388e-07, 3.03890889921758510417e-07, + 3.06252891144972267537e-07, 3.08701513613258141075e-07, + 3.11244787989714509378e-07, 3.13891934589336184321e-07, + 3.16653613755314681314e-07, 3.19542246256559459667e-07, + 3.22572428717978242099e-07, 3.25761480217458181578e-07, + 3.29130173358915628534e-07, 3.32703730345002116955e-07, + 3.36513208964639108346e-07, 3.40597478255417943913e-07, + 3.45006114675213401550e-07, 3.49803789521323211592e-07, + 3.55077180848341416206e-07, 3.60946392031859609868e-07, + 3.67584959507244041831e-07, 3.75257645787954431030e-07, + 3.84399301057791926300e-07, 3.95804015855768440983e-07, + 4.11186015434435801956e-07, 4.35608969373823260746e-07] + +# Taken from numpy +_fi_float_data = [ + 1.00000000000000000000e+00, 9.77101701267671596263e-01, + 9.59879091800106665211e-01, 9.45198953442299649730e-01, + 9.32060075959230460718e-01, 9.19991505039347012840e-01, + 9.08726440052130879366e-01, 8.98095921898343418910e-01, + 8.87984660755833377088e-01, 8.78309655808917399966e-01, + 8.69008688036857046555e-01, 8.60033621196331532488e-01, + 8.51346258458677951353e-01, 8.42915653112204177333e-01, + 8.34716292986883434679e-01, 8.26726833946221373317e-01, + 8.18929191603702366642e-01, 8.11307874312656274185e-01, + 8.03849483170964274059e-01, 7.96542330422958966274e-01, + 7.89376143566024590648e-01, 7.82341832654802504798e-01, + 7.75431304981187174974e-01, 7.68637315798486264740e-01, + 7.61953346836795386565e-01, 7.55373506507096115214e-01, + 7.48892447219156820459e-01, 7.42505296340151055290e-01, + 7.36207598126862650112e-01, 7.29995264561476231435e-01, + 7.23864533468630222401e-01, 7.17811932630721960535e-01, + 7.11834248878248421200e-01, 7.05928501332754310127e-01, + 7.00091918136511615067e-01, 6.94321916126116711609e-01, + 6.88616083004671808432e-01, 6.82972161644994857355e-01, + 6.77388036218773526009e-01, 6.71861719897082099173e-01, + 6.66391343908750100056e-01, 6.60975147776663107813e-01, + 6.55611470579697264149e-01, 6.50298743110816701574e-01, + 6.45035480820822293424e-01, 6.39820277453056585060e-01, + 6.34651799287623608059e-01, 6.29528779924836690007e-01, + 6.24450015547026504592e-01, 6.19414360605834324325e-01, + 6.14420723888913888899e-01, 6.09468064925773433949e-01, + 6.04555390697467776029e-01, 5.99681752619125263415e-01, + 5.94846243767987448159e-01, 5.90047996332826008015e-01, + 5.85286179263371453274e-01, 5.80559996100790898232e-01, + 5.75868682972353718164e-01, 5.71211506735253227163e-01, + 5.66587763256164445025e-01, 5.61996775814524340831e-01, + 5.57437893618765945014e-01, 5.52910490425832290562e-01, + 5.48413963255265812791e-01, 5.43947731190026262382e-01, + 5.39511234256952132426e-01, 5.35103932380457614215e-01, + 5.30725304403662057062e-01, 5.26374847171684479008e-01, + 5.22052074672321841931e-01, 5.17756517229756352272e-01, + 5.13487720747326958914e-01, 5.09245245995747941592e-01, + 5.05028667943468123624e-01, 5.00837575126148681903e-01, + 4.96671569052489714213e-01, 4.92530263643868537748e-01, + 4.88413284705458028423e-01, 4.84320269426683325253e-01, + 4.80250865909046753544e-01, 4.76204732719505863248e-01, + 4.72181538467730199660e-01, 4.68180961405693596422e-01, + 4.64202689048174355069e-01, 4.60246417812842867345e-01, + 4.56311852678716434184e-01, 4.52398706861848520777e-01, + 4.48506701507203064949e-01, 4.44635565395739396077e-01, + 4.40785034665803987508e-01, 4.36954852547985550526e-01, + 4.33144769112652261445e-01, 4.29354541029441427735e-01, + 4.25583931338021970170e-01, 4.21832709229495894654e-01, + 4.18100649837848226120e-01, 4.14387534040891125642e-01, + 4.10693148270188157500e-01, 4.07017284329473372217e-01, + 4.03359739221114510510e-01, 3.99720314980197222177e-01, + 3.96098818515832451492e-01, 3.92495061459315619512e-01, + 3.88908860018788715696e-01, 3.85340034840077283462e-01, + 3.81788410873393657674e-01, 3.78253817245619183840e-01, + 3.74736087137891138443e-01, 3.71235057668239498696e-01, + 3.67750569779032587814e-01, 3.64282468129004055601e-01, + 3.60830600989648031529e-01, 3.57394820145780500731e-01, + 3.53974980800076777232e-01, 3.50570941481406106455e-01, + 3.47182563956793643900e-01, 3.43809713146850715049e-01, + 3.40452257044521866547e-01, 3.37110066637006045021e-01, + 3.33783015830718454708e-01, 3.30470981379163586400e-01, + 3.27173842813601400970e-01, 3.23891482376391093290e-01, + 3.20623784956905355514e-01, 3.17370638029913609834e-01, + 3.14131931596337177215e-01, 3.10907558126286509559e-01, + 3.07697412504292056035e-01, 3.04501391976649993243e-01, + 3.01319396100803049698e-01, 2.98151326696685481377e-01, + 2.94997087799961810184e-01, 2.91856585617095209972e-01, + 2.88729728482182923521e-01, 2.85616426815501756042e-01, + 2.82516593083707578948e-01, 2.79430141761637940157e-01, + 2.76356989295668320494e-01, 2.73297054068577072172e-01, + 2.70250256365875463072e-01, 2.67216518343561471038e-01, + 2.64195763997261190426e-01, 2.61187919132721213522e-01, + 2.58192911337619235290e-01, 2.55210669954661961700e-01, + 2.52241126055942177508e-01, 2.49284212418528522415e-01, + 2.46339863501263828249e-01, 2.43408015422750312329e-01, + 2.40488605940500588254e-01, 2.37581574431238090606e-01, + 2.34686861872330010392e-01, 2.31804410824338724684e-01, + 2.28934165414680340644e-01, 2.26076071322380278694e-01, + 2.23230075763917484855e-01, 2.20396127480151998723e-01, + 2.17574176724331130872e-01, 2.14764175251173583536e-01, + 2.11966076307030182324e-01, 2.09179834621125076977e-01, + 2.06405406397880797353e-01, 2.03642749310334908452e-01, + 2.00891822494656591136e-01, 1.98152586545775138971e-01, + 1.95425003514134304483e-01, 1.92709036903589175926e-01, + 1.90004651670464985713e-01, 1.87311814223800304768e-01, + 1.84630492426799269756e-01, 1.81960655599522513892e-01, + 1.79302274522847582272e-01, 1.76655321443734858455e-01, + 1.74019770081838553999e-01, 1.71395595637505754327e-01, + 1.68782774801211288285e-01, 1.66181285764481906364e-01, + 1.63591108232365584074e-01, 1.61012223437511009516e-01, + 1.58444614155924284882e-01, 1.55888264724479197465e-01, + 1.53343161060262855866e-01, 1.50809290681845675763e-01, + 1.48286642732574552861e-01, 1.45775208005994028060e-01, + 1.43274978973513461566e-01, 1.40785949814444699690e-01, + 1.38308116448550733057e-01, 1.35841476571253755301e-01, + 1.33386029691669155683e-01, 1.30941777173644358090e-01, + 1.28508722279999570981e-01, 1.26086870220185887081e-01, + 1.23676228201596571932e-01, 1.21276805484790306533e-01, + 1.18888613442910059947e-01, 1.16511665625610869035e-01, + 1.14145977827838487895e-01, 1.11791568163838089811e-01, + 1.09448457146811797824e-01, 1.07116667774683801961e-01, + 1.04796225622487068629e-01, 1.02487158941935246892e-01, + 1.00189498768810017482e-01, 9.79032790388624646338e-02, + 9.56285367130089991594e-02, 9.33653119126910124859e-02, + 9.11136480663737591268e-02, 8.88735920682758862021e-02, + 8.66451944505580717859e-02, 8.44285095703534715916e-02, + 8.22235958132029043366e-02, 8.00305158146630696292e-02, + 7.78493367020961224423e-02, 7.56801303589271778804e-02, + 7.35229737139813238622e-02, 7.13779490588904025339e-02, + 6.92451443970067553879e-02, 6.71246538277884968737e-02, + 6.50165779712428976156e-02, 6.29210244377581412456e-02, + 6.08381083495398780614e-02, 5.87679529209337372930e-02, + 5.67106901062029017391e-02, 5.46664613248889208474e-02, + 5.26354182767921896513e-02, 5.06177238609477817000e-02, + 4.86135532158685421122e-02, 4.66230949019303814174e-02, + 4.46465522512944634759e-02, 4.26841449164744590750e-02, + 4.07361106559409394401e-02, 3.88027074045261474722e-02, + 3.68842156885673053135e-02, 3.49809414617161251737e-02, + 3.30932194585785779961e-02, 3.12214171919203004046e-02, + 2.93659397581333588001e-02, 2.75272356696031131329e-02, + 2.57058040085489103443e-02, 2.39022033057958785407e-02, + 2.21170627073088502113e-02, 2.03510962300445102935e-02, + 1.86051212757246224594e-02, 1.68800831525431419000e-02, + 1.51770883079353092332e-02, 1.34974506017398673818e-02, + 1.18427578579078790488e-02, 1.02149714397014590439e-02, + 8.61658276939872638800e-03, 7.05087547137322242369e-03, + 5.52240329925099155545e-03, 4.03797259336302356153e-03, + 2.60907274610215926189e-03, 1.26028593049859797236e-03] + +def _load_wi(builder, idx, fptype, data): + module = builder.function.module + fmt_ty = ir.ArrayType(fptype, 256) + global_wi = ir.GlobalVariable(module, fmt_ty, + name="__pnl_builtin_wi_{}".format(fptype)) + global_wi.linkage = "internal" + global_wi.global_constant = True + global_wi.initializer = fmt_ty(data) + + ptr = builder.gep(global_wi, [idx.type(0), idx]) + return builder.load(ptr) + + +def _load_ki(builder, idx, itype, data): + module = builder.function.module + fmt_ty = ir.ArrayType(itype, 256) + global_ki = ir.GlobalVariable(module, fmt_ty, + name="__pnl_builtin_ki_{}".format(itype)) + global_ki.linkage = "internal" + global_ki.global_constant = True + global_ki.initializer = fmt_ty(data) + + ptr = builder.gep(global_ki, [idx.type(0), idx]) + return builder.load(ptr) + + +def _load_fi(builder, idx, fptype, data): + module = builder.function.module + fmt_ty = ir.ArrayType(fptype, 256) + name = "__pnl_builtin_fi_{}".format(fptype) + try: + global_fi = module.get_global(name) + assert global_fi.type.pointee == fmt_ty + except KeyError: + global_fi = ir.GlobalVariable(module, fmt_ty, + name=name) + global_fi.linkage = "internal" + global_fi.global_constant = True + global_fi.initializer = fmt_ty(data) + + ptr = builder.gep(global_fi, [idx.type(0), idx]) + return builder.load(ptr) + + +def _setup_philox_rand_normal(ctx, state_ty, gen_float, gen_int, wi_data, ki_data, fi_data): + fptype = gen_float.args[1].type.pointee + itype = gen_int.args[1].type.pointee + if fptype != ctx.float_ty: + # We don't have numeric halpers available for the desired type + return + builder = _setup_builtin_func_builder(ctx, "philox_rand_normal", + (state_ty.as_pointer(), fptype.as_pointer())) + state, out = builder.function.args + + loop_block = builder.append_basic_block("gen_loop_ziggurat") + + # Allocate storage for calling int/float PRNG + # outside of the loop + tmp_fptype = builder.alloca(fptype) + tmp_itype = builder.alloca(itype) + + # Enter the main generation loop + builder.branch(loop_block) + builder.position_at_end(loop_block) + + r_ptr = tmp_itype + builder.call(gen_int, [state, r_ptr]) + r = builder.load(r_ptr) + + # This is only for 64 bit + # Extract index to the global table + idx = builder.and_(r, r.type(0xff)) + r = builder.lshr(r, r.type(8)) + + # Extract sign + sign = builder.and_(r, r.type(0x1)) + r = builder.lshr(r, r.type(1)) + + # Extract abs + MANTISSA = 0x000fffffffffffff if fptype is ir.DoubleType() else 0x007fffff + rabs = builder.and_(r, r.type(MANTISSA)) + rabs_f = builder.uitofp(rabs, fptype) + + wi = _load_wi(builder, idx, fptype, wi_data) + x = builder.fmul(rabs_f, wi) + + # Apply sign + neg_x = helpers.fneg(builder, x) + x = builder.select(builder.trunc(sign, ctx.bool_ty), neg_x, x) + + ki = _load_ki(builder, idx, itype, ki_data) + is_lt_ki = builder.icmp_unsigned("<", rabs, ki) + with builder.if_then(is_lt_ki, likely=True): + builder.store(x, out) + builder.ret_void() + + is_idx0 = builder.icmp_unsigned("==", idx.type(0), idx) + with builder.if_then(is_idx0): + inner_loop_block = builder.block + + ZIGGURAT_NOR_R = 3.6541528853610087963519472518 + ZIGGURAT_NOR_INV_R = 0.27366123732975827203338247596 + + # xx = -ziggurat_nor_inv_r * npy_log1p(-next_double(bitgen_state)); + builder.call(gen_float, [state, tmp_fptype]) + xx = builder.load(tmp_fptype) + xx = helpers.fneg(builder, xx) + xx = helpers.log1p(ctx, builder, xx) + xx = builder.fmul(xx.type(-ZIGGURAT_NOR_INV_R), xx) + + # yy = -npy_log1p(-next_double(bitgen_state)); + builder.call(gen_float, [state, tmp_fptype]) + yy = builder.load(tmp_fptype) + yy = helpers.fneg(builder, yy) + yy = helpers.log1p(ctx, builder, yy) + yy = helpers.fneg(builder, yy) + + # if (yy + yy > xx * xx) + lhs = builder.fadd(yy, yy) + rhs = builder.fmul(xx, xx) + cond = builder.fcmp_ordered(">", lhs, rhs) + with builder.if_then(cond): + # return ((rabs >> 8) & 0x1) ? -(ziggurat_nor_r + xx) : ziggurat_nor_r + xx; + val = builder.fadd(xx.type(ZIGGURAT_NOR_R), xx) + neg_val = helpers.fneg(builder, val) + sign_cond = builder.lshr(rabs, rabs.type(8)) + sign_cond = builder.trunc(sign_cond, ctx.bool_ty) + val = builder.select(sign_cond, neg_val, val) + builder.store(val, out) + builder.ret_void() + + builder.branch(inner_loop_block) + + # idx has to be > 0 + fi_idx = _load_fi(builder, idx, fptype, fi_data) + fi_idxm1 = _load_fi(builder, builder.sub(idx, idx.type(1)), fptype, fi_data) + x_sq = builder.fmul(x, x) + x_sq_nh = builder.fmul(x_sq, x_sq.type(-0.5)) + + exp_x_sqnh = helpers.exp(ctx, builder, x_sq_nh) + + # next uniform random number + r_ptr = tmp_fptype + builder.call(gen_float, [state, r_ptr]) + r = builder.load(r_ptr) + + # if (((fi_double[idx - 1] - fi_double[idx]) * next_double(bitgen_state) + + # fi_double[idx]) < exp(-0.5 * x * x)) + lhs = builder.fsub(fi_idxm1, fi_idx) + lhs = builder.fmul(lhs, r) + lhs = builder.fadd(lhs, fi_idx) + + should_ret = builder.fcmp_ordered("<", lhs, exp_x_sqnh) + with builder.if_then(should_ret): + builder.store(x, out) + builder.ret_void() + + builder.branch(loop_block) + + +def get_philox_state_struct(ctx): + int64_ty = ir.IntType(64) + int16_ty = ir.IntType(16) + return ir.LiteralStructType([ + ir.ArrayType(int64_ty, 4), # counter + ir.ArrayType(int64_ty, 2), # key + ir.ArrayType(int64_ty, _PHILOX_DEFAULT_BUFFER_SIZE), # pre-gen buffer + ctx.int32_ty, # the other half of random 64 bit int + int16_ty, # buffer pos + ctx.bool_ty, # has uint buffered + int64_ty]) # seed + + +def setup_philox(ctx): + state_ty = get_philox_state_struct(ctx) + + _setup_philox_rand_init(ctx, state_ty) + + gen_int64 = _setup_philox_rand_int64(ctx, state_ty) + gen_double = _setup_philox_rand_double(ctx, state_ty, gen_int64) + _setup_philox_rand_normal(ctx, state_ty, gen_double, gen_int64, _wi_double_data, _ki_i64_data, _fi_double_data) + + gen_int32 = _setup_philox_rand_int32(ctx, state_ty, gen_int64) + gen_float = _setup_philox_rand_float(ctx, state_ty, gen_int32) + _setup_philox_rand_normal(ctx, state_ty, gen_float, gen_int32, _wi_float_data, _ki_i32_data, _fi_float_data) diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index 5f29bdfdc00..1ad7df72155 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -55,6 +55,7 @@ def np_cmp(builder, x, y): numpy_handlers = { 'tanh': self.call_builtin_np_tanh, 'exp': self.call_builtin_np_exp, + 'sqrt': self.call_builtin_np_sqrt, 'equal': get_np_cmp("=="), 'not_equal': get_np_cmp("!="), 'less': get_np_cmp("<"), @@ -470,6 +471,10 @@ def call_builtin_np_exp(self, builder, x): x = self.get_rval(x) return self._do_unary_op(builder, x, lambda builder, x: helpers.exp(self.ctx, builder, x)) + def call_builtin_np_sqrt(self, builder, x): + x = self.get_rval(x) + return self._do_unary_op(builder, x, lambda builder, x: helpers.sqrt(self.ctx, builder, x)) + def call_builtin_np_max(self, builder, x): # numpy max searches for the largest scalar and propagates NaNs be default. # Only the default behaviour is supported atm @@ -717,7 +722,7 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): with _gen_composition_exec_context(ctx, composition, tags=tags) as (builder, data, params, cond_gen): state, _, comp_in, _, cond = builder.function.args - nodes_states = helpers.get_param_ptr(builder, composition, state, "nodes") + nodes_states = helpers.get_state_ptr(builder, composition, state, "nodes") # Allocate temporary output storage output_storage = builder.alloca(data.type.pointee, name="output_storage") diff --git a/psyneulink/core/llvm/debug.py b/psyneulink/core/llvm/debug.py index e297a7863ab..ec9ccaa50f6 100644 --- a/psyneulink/core/llvm/debug.py +++ b/psyneulink/core/llvm/debug.py @@ -33,7 +33,8 @@ instead of loading them from the param argument * "const_state" -- hardcode base context values into generate code, instead of laoding them from the context argument - * "no_ref_pass" -- Don't pass arguments to llvm functions by reference + * "opt" -- Set compiler optimization level (0,1,2,3) + * "cuda_max_regs" -- Set maximum allowed GPU arch registers Compiled code dump: * "llvm" -- dumps LLVM IR into a file (named after the dumped module). diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 8c3ba58443f..ac722dd9d77 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -18,6 +18,7 @@ from inspect import isgenerator import os import sys +import time from psyneulink.core import llvm as pnlvm @@ -69,17 +70,28 @@ def __init__(self): self._debug_env = debug_env def _get_compilation_param(self, name, init_method, arg): - struct = getattr(self, name) + struct = getattr(self, name, None) if struct is None: struct_ty = self._bin_func.byref_arg_types[arg] init_f = getattr(self._obj, init_method) if len(self._execution_contexts) > 1: struct_ty = struct_ty * len(self._execution_contexts) + init_start = time.time() initializer = (init_f(ex) for ex in self._execution_contexts) else: + init_start = time.time() initializer = init_f(self._execution_contexts[0]) + init_end = time.time() struct = struct_ty(*initializer) + struct_end = time.time() + + + if "time_stat" in self._debug_env: + print("Time to get initializer for struct:", name, + "for", self._obj.name, ":", init_end - init_start) + print("Time to instantiate struct:", name, + "for", self._obj.name, ":", struct_end - init_end) setattr(self, name, struct) if "stat" in self._debug_env: print("Instantiated struct:", name, "( size:" , @@ -158,7 +170,7 @@ def _cuda_conditions(self): @property def _cuda_out(self): if self._buffer_cuda_out is None: - size = ctypes.sizeof(self._vo_ty) + size = ctypes.sizeof(self._ct_vo) self._buffer_cuda_out = jit_engine.pycuda.driver.mem_alloc(size) return self._buffer_cuda_out @@ -174,7 +186,7 @@ def cuda_execute(self, variable): threads=len(self._execution_contexts)) # Copy the result from the device - ct_res = self.download_ctype(self._cuda_out, self._vo_ty, 'result') + ct_res = self.download_ctype(self._cuda_out, type(self._ct_vo), 'result') return _convert_ctype_to_python(ct_res) @@ -188,10 +200,7 @@ def __init__(self, component, execution_ids=[None], *, tags=frozenset()): ] self._component = component - self._param = None - self._state = None - - par_struct_ty, ctx_struct_ty, vi_ty, vo_ty = self._bin_func.byref_arg_types + _, _, vi_ty, vo_ty = self._bin_func.byref_arg_types if len(execution_ids) > 1: self._bin_multirun = self._bin_func.get_multi_run() @@ -199,9 +208,7 @@ def __init__(self, component, execution_ids=[None], *, tags=frozenset()): vo_ty = vo_ty * len(execution_ids) vi_ty = vi_ty * len(execution_ids) - self._vo_ty = vo_ty self._ct_vo = vo_ty() - self._vi_ty = vi_ty self._vi_dty = _element_dtype(vi_ty) if "stat" in self._debug_env: print("Input struct size:", _pretty_size(ctypes.sizeof(vi_ty)), @@ -223,7 +230,7 @@ def _state_struct(self): def execute(self, variable): # Make sure function inputs are 2d. - # Mechanism inptus are already 3d so the first part is nop. + # Mechanism inputs are already 3d so the first part is nop. new_variable = np.asfarray(np.atleast_2d(variable), dtype=self._vi_dty) @@ -271,9 +278,6 @@ def __init__(self, composition, execution_ids=[None], *, additional_tags=frozens self.__tags = frozenset(additional_tags) self.__conds = None - self._state = None - self._param = None - self._data = None if len(execution_ids) > 1: self._ct_len = ctypes.c_int(len(execution_ids)) @@ -320,7 +324,7 @@ def _bin_func_multirun(self): def _set_bin_node(self, node): assert node in self._composition._all_nodes - wrapper = builder_context.LLVMBuilderContext.get_global().get_node_wrapper(self._composition, node) + wrapper = builder_context.LLVMBuilderContext.get_current().get_node_wrapper(self._composition, node) self.__bin_func = pnlvm.LLVMBinaryFunction.from_obj( wrapper, tags=self.__tags.union({"node_wrapper"})) @@ -407,11 +411,14 @@ def _extract_node_struct(self, node, data): # followed by a list of projection parameters; get the first one # output structure consists of a list of node outputs, # followed by a list of nested data structures; get the first one - field = data._fields_[0][0] - res_struct = getattr(data, field) + field_name = data._fields_[0][0] + res_struct = getattr(data, field_name) + + # Get the index into the array of all nodes index = self._composition._get_node_index(node) - field = res_struct._fields_[index][0] - res_struct = getattr(res_struct, field) + field_name = res_struct._fields_[index][0] + res_struct = getattr(res_struct, field_name) + return _convert_ctype_to_python(res_struct) def extract_node_struct(self, node, struct): @@ -656,20 +663,20 @@ def cuda_run(self, inputs, runs, num_input_sets): def _prepare_evaluate(self, variable, num_evaluations): ocm = self._composition.controller assert len(self._execution_contexts) == 1 - context = self._execution_contexts[0] bin_func = pnlvm.LLVMBinaryFunction.from_obj(ocm, tags=frozenset({"evaluate", "alloc_range"})) self.__bin_func = bin_func - assert len(bin_func.byref_arg_types) == 7 # There are 7 arguments to evaluate_alloc_range: # comp_param, comp_state, from, to, results, input, comp_data # all but #4 are shared + assert len(bin_func.byref_arg_types) == 7 # Directly initialized structures - ct_comp_param = bin_func.byref_arg_types[0](*ocm.agent_rep._get_param_initializer(context)) - ct_comp_state = bin_func.byref_arg_types[1](*ocm.agent_rep._get_state_initializer(context)) - ct_comp_data = bin_func.byref_arg_types[6](*ocm.agent_rep._get_data_initializer(context)) + assert ocm.agent_rep is self._composition + ct_comp_param = self._get_compilation_param('_eval_param', '_get_param_initializer', 0) + ct_comp_state = self._get_compilation_param('_eval_state', '_get_state_initializer', 1) + ct_comp_data = self._get_compilation_param('_eval_data', '_get_data_initializer', 6) # Construct input variable var_dty = _element_dtype(bin_func.byref_arg_types[5]) @@ -678,6 +685,7 @@ def _prepare_evaluate(self, variable, num_evaluations): # Output ctype out_ty = bin_func.byref_arg_types[4] * num_evaluations + # return variable as numpy array. pycuda can use it directly return ct_comp_param, ct_comp_state, ct_comp_data, converted_variable, out_ty def cuda_evaluate(self, variable, num_evaluations): @@ -685,8 +693,7 @@ def cuda_evaluate(self, variable, num_evaluations): self._prepare_evaluate(variable, num_evaluations) self._uploaded_bytes['input'] += converted_variable.nbytes - # Ouput is allocated on device, but we need the ctype. - + # Output is allocated on device, but we need the ctype (out_ty). cuda_args = (self.upload_ctype(ct_comp_param, 'params'), self.upload_ctype(ct_comp_state, 'state'), jit_engine.pycuda.driver.mem_alloc(ctypes.sizeof(out_ty)), diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index c5fe4f06c3a..dce10f5ba15 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -81,12 +81,18 @@ def uint_min(builder, val, other): def get_param_ptr(builder, component, params_ptr, param_name): + # check if the passed location matches expected size + assert len(params_ptr.type.pointee) == len(component.llvm_param_ids) + idx = ir.IntType(32)(component.llvm_param_ids.index(param_name)) return builder.gep(params_ptr, [ir.IntType(32)(0), idx], name="ptr_param_{}_{}".format(param_name, component.name)) def get_state_ptr(builder, component, state_ptr, stateful_name, hist_idx=0): + # check if the passed location matches expected size + assert len(state_ptr.type.pointee) == len(component.llvm_state_ids) + idx = ir.IntType(32)(component.llvm_state_ids.index(stateful_name)) ptr = builder.gep(state_ptr, [ir.IntType(32)(0), idx], name="ptr_state_{}_{}".format(stateful_name, @@ -128,6 +134,19 @@ def load_extract_scalar_array_one(builder, ptr): return val +def umul_lo_hi(builder, a, b): + assert a.type.width == b.type.width + + a_val = builder.zext(a, ir.IntType(a.type.width * 2)) + b_val = builder.zext(b, ir.IntType(b.type.width * 2)) + res = builder.mul(a_val, b_val) + + lo = builder.trunc(res, a.type) + hi = builder.lshr(res, res.type(a.type.width)) + hi = builder.trunc(hi, a.type) + return lo, hi + + def fneg(builder, val, name=""): return builder.fsub(val.type(-0.0), val, name) @@ -136,6 +155,18 @@ def exp(ctx, builder, x): exp_f = ctx.get_builtin("exp", [x.type]) return builder.call(exp_f, [x]) +def log(ctx, builder, x): + log_f = ctx.get_builtin("log", [x.type]) + return builder.call(log_f, [x]) + +def log1p(ctx, builder, x): + log_f = ctx.get_builtin("log", [x.type]) + x1p = builder.fadd(x, x.type(1)) + return builder.call(log_f, [x1p]) + +def sqrt(ctx, builder, x): + sqrt_f = ctx.get_builtin("sqrt", [x.type]) + return builder.call(sqrt_f, [x]) def tanh(ctx, builder, x): tanh_f = ctx.get_builtin("tanh", [x.type]) diff --git a/psyneulink/core/llvm/jit_engine.py b/psyneulink/core/llvm/jit_engine.py index 24d114beb8f..faf7be4918a 100644 --- a/psyneulink/core/llvm/jit_engine.py +++ b/psyneulink/core/llvm/jit_engine.py @@ -67,11 +67,13 @@ def _binding_initialize(): def _cpu_jit_constructor(): _binding_initialize() + opt_level = int(debug_env.get('opt', 2)) + # PassManagerBuilder can be shared __pass_manager_builder = binding.PassManagerBuilder() - __pass_manager_builder.loop_vectorize = True - __pass_manager_builder.slp_vectorize = True - __pass_manager_builder.opt_level = 2 + __pass_manager_builder.loop_vectorize = opt_level != 0 + __pass_manager_builder.slp_vectorize = opt_level != 0 + __pass_manager_builder.opt_level = opt_level __cpu_features = binding.get_host_cpu_features().flatten() __cpu_name = binding.get_host_cpu_name() @@ -80,14 +82,14 @@ def _cpu_jit_constructor(): __cpu_target = binding.Target.from_default_triple() # FIXME: reloc='static' is needed to avoid crashes on win64 # see: https://github.com/numba/llvmlite/issues/457 - __cpu_target_machine = __cpu_target.create_target_machine(cpu=__cpu_name, features=__cpu_features, opt=2, reloc='static') + __cpu_target_machine = __cpu_target.create_target_machine(cpu=__cpu_name, features=__cpu_features, opt=opt_level, reloc='static') __cpu_pass_manager = binding.ModulePassManager() __cpu_target_machine.add_analysis_passes(__cpu_pass_manager) __pass_manager_builder.populate(__cpu_pass_manager) # And an execution engine with a builtins backing module - builtins_module = _generate_cpu_builtins_module(LLVMBuilderContext.float_ty) + builtins_module = _generate_cpu_builtins_module(LLVMBuilderContext.get_current().float_ty) if "llvm" in debug_env: with open(builtins_module.name + '.parse.ll', 'w') as dump_file: dump_file.write(str(builtins_module)) @@ -101,10 +103,12 @@ def _cpu_jit_constructor(): def _ptx_jit_constructor(): _binding_initialize() + opt_level = int(debug_env.get('opt', 0)) + # PassManagerBuilder can be shared __pass_manager_builder = binding.PassManagerBuilder() - __pass_manager_builder.opt_level = 1 # Basic optimizations - __pass_manager_builder.size_level = 1 # asic size optimizations + __pass_manager_builder.opt_level = opt_level + __pass_manager_builder.size_level = 1 # Try to reduce size to reduce PTX parsing time # Use default device # TODO: Add support for multiple devices @@ -286,18 +290,19 @@ def __init__(self, tm): self._target_machine = tm # -dc option tells the compiler that the code will be used for linking - self._generated_builtins = pycuda.compiler.compile(_ptx_builtin_source.format(type=str(LLVMBuilderContext.float_ty)), target='cubin', options=['-dc']) + self._generated_builtins = pycuda.compiler.compile(_ptx_builtin_source.format(type=str(LLVMBuilderContext.get_current().float_ty)), target='cubin', options=['-dc']) def set_object_cache(cache): pass def add_module(self, module): + max_regs = int(debug_env.get("cuda_max_regs", 256)) try: # LLVM can't produce CUBIN for some reason start_time = time.perf_counter() ptx = self._target_machine.emit_assembly(module) ptx_time = time.perf_counter() - mod = pycuda.compiler.DynamicModule() + mod = pycuda.compiler.DynamicModule(link_options=[(pycuda.driver.jit_option.MAX_REGISTERS, max_regs)]) mod.add_data(self._generated_builtins, pycuda.driver.jit_input_type.CUBIN, "builtins.cubin") mod.add_data(ptx.encode(), pycuda.driver.jit_input_type.PTX, module.name + ".ptx") module_time = time.perf_counter() diff --git a/psyneulink/library/components/mechanisms/modulatory/control/agt/agtcontrolmechanism.py b/psyneulink/library/components/mechanisms/modulatory/control/agt/agtcontrolmechanism.py index 1eabcd46bce..f97e9a80003 100644 --- a/psyneulink/library/components/mechanisms/modulatory/control/agt/agtcontrolmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/control/agt/agtcontrolmechanism.py @@ -165,11 +165,10 @@ from psyneulink.core.components.functions.stateful.integratorfunctions import DualAdaptiveIntegrator from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism from psyneulink.core.components.mechanisms.processing.objectivemechanism import MONITORED_OUTPUT_PORTS, ObjectiveMechanism -from psyneulink.core.components.shellclasses import Mechanism, System_Base +from psyneulink.core.components.shellclasses import Mechanism from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.globals.keywords import \ - CONTROL, CONTROL_PROJECTIONS, CONTROL_SIGNALS, INIT_EXECUTE_METHOD_ONLY, \ - MECHANISM, MULTIPLICATIVE, OBJECTIVE_MECHANISM + INIT_EXECUTE_METHOD_ONLY, MECHANISM, OBJECTIVE_MECHANISM from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel diff --git a/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py b/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py index edc992a2a3b..e68c13b57d1 100644 --- a/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py @@ -305,10 +305,8 @@ from psyneulink.core.components.projections.modulatory.controlprojection import ControlProjection from psyneulink.core.components.shellclasses import Mechanism from psyneulink.core.components.ports.outputport import OutputPort -from psyneulink.core.globals.context import Context, ContextFlags from psyneulink.core.globals.keywords import \ - ALL, CONTROL, CONTROL_PROJECTIONS, FUNCTION, INIT_EXECUTE_METHOD_ONLY, \ - MULTIPLICATIVE, MULTIPLICATIVE_PARAM, PROJECTIONS + INIT_EXECUTE_METHOD_ONLY, MULTIPLICATIVE_PARAM, PROJECTIONS from psyneulink.core.globals.parameters import Parameter, ParameterAlias from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel diff --git a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py index 1f09f20b40c..6702cccb373 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py @@ -718,7 +718,7 @@ class Parameters(ProcessingMechanism.Parameters): input_format = Parameter(SCALAR, stateful=False, loggable=False) initializer = np.array([[0]]) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) output_ports = Parameter( [DECISION_VARIABLE, RESPONSE_TIME], @@ -1074,7 +1074,7 @@ def _execute( # Convert ER to decision variable: threshold = float(self.function._get_current_parameter_value(THRESHOLD, context)) random_state = self._get_current_parameter_value(self.parameters.random_state, context) - if random_state.rand() < return_value[self.PROBABILITY_LOWER_THRESHOLD_INDEX]: + if random_state.uniform() < return_value[self.PROBABILITY_LOWER_THRESHOLD_INDEX]: return_value[self.DECISION_VARIABLE_INDEX] = np.atleast_1d(-1 * threshold) else: return_value[self.DECISION_VARIABLE_INDEX] = threshold @@ -1127,9 +1127,10 @@ def _gen_llvm_invoke_function(self, ctx, builder, function, params, state, varia threshold = pnlvm.helpers.load_extract_scalar_array_one(builder, threshold_ptr) # Load mechanism state to generate random numbers - state = builder.function.args[1] - random_state = pnlvm.helpers.get_state_ptr(builder, self, state, "random_state") - random_f = ctx.import_llvm_function("__pnl_builtin_mt_rand_double") + mech_params = builder.function.args[0] + mech_state = builder.function.args[1] + random_state = ctx.get_random_state_ptr(builder, self, mech_state, mech_params) + random_f = ctx.get_uniform_dist_function_by_state(random_state) random_val_ptr = builder.alloca(random_f.args[1].type.pointee) builder.call(random_f, [random_state, random_val_ptr]) random_val = builder.load(random_val_ptr) diff --git a/psyneulink/library/components/mechanisms/processing/leabramechanism.py b/psyneulink/library/components/mechanisms/processing/leabramechanism.py index 14f1407ecb5..16cbc400030 100644 --- a/psyneulink/library/components/mechanisms/processing/leabramechanism.py +++ b/psyneulink/library/components/mechanisms/processing/leabramechanism.py @@ -104,9 +104,8 @@ import numpy as np from psyneulink.core.components.functions.function import Function_Base -from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism_Base -from psyneulink.core.globals.keywords import FUNCTION, INPUT_PORTS, LEABRA_FUNCTION, LEABRA_FUNCTION_TYPE, LEABRA_MECHANISM, NETWORK, OUTPUT_PORTS, PREFERENCE_SET_NAME +from psyneulink.core.globals.keywords import LEABRA_FUNCTION, LEABRA_FUNCTION_TYPE, LEABRA_MECHANISM, NETWORK, PREFERENCE_SET_NAME from psyneulink.core.globals.parameters import FunctionParameter, Parameter from psyneulink.core.globals.preferences.basepreferenceset import REPORT_OUTPUT_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel diff --git a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py index d4a132dcc4b..69a48ef6fc5 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py @@ -393,6 +393,9 @@ def __str__(self): def _CHM_output_activity_getter(owning_component=None, context=None): current_activity = owning_component.parameters.current_activity._get(context) + if current_activity is None: + return None + if owning_component.target_size: return current_activity[owning_component.target_start:owning_component.target_end] else: @@ -400,18 +403,27 @@ def _CHM_output_activity_getter(owning_component=None, context=None): def _CHM_input_activity_getter(owning_component=None, context=None): current_activity = owning_component.parameters.current_activity._get(context) + if current_activity is None: + return None + return current_activity[:owning_component.input_size] def _CHM_hidden_activity_getter(owning_component=None, context=None): if owning_component.hidden_size: current_activity = owning_component.parameters.current_activity._get(context) + if current_activity is None: + return None + return current_activity[owning_component.input_size:owning_component.target_start] def _CHM_target_activity_getter(owning_component=None, context=None): if owning_component.target_size: current_activity = owning_component.parameters.current_activity._get(context) + if current_activity is None: + return None + return current_activity[owning_component.target_start:owning_component.target_end] @@ -937,10 +949,10 @@ class Parameters(RecurrentTransferMechanism.Parameters): ) max_passes = Parameter(1000, stateful=False) - output_activity = Parameter(None, read_only=True, getter=_CHM_output_activity_getter) - input_activity = Parameter(None, read_only=True, getter=_CHM_input_activity_getter) - hidden_activity = Parameter(None, read_only=True, getter=_CHM_hidden_activity_getter) - target_activity = Parameter(None, read_only=True, getter=_CHM_target_activity_getter) + output_activity = Parameter(None, read_only=True, getter=_CHM_output_activity_getter, dependencies='current_activity') + input_activity = Parameter(None, read_only=True, getter=_CHM_input_activity_getter, dependencies='current_activity') + hidden_activity = Parameter(None, read_only=True, getter=_CHM_hidden_activity_getter, dependencies='current_activity') + target_activity = Parameter(None, read_only=True, getter=_CHM_target_activity_getter, dependencies='current_activity') execution_phase = Parameter(None, read_only=True) # is_finished_ = Parameter(False, read_only=True) diff --git a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py index 23065b9ebb3..6b2de869bdd 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py @@ -453,6 +453,7 @@ def __init__(self, time_step_size=None, clip=None, output_ports:tc.optional(tc.any(str, Iterable))=None, + integrator_function=None, params=None, name=None, prefs:is_pref_set=None, @@ -514,7 +515,7 @@ def __init__(self, auto=self_excitation, hetero=hetero, function=function, - integrator_function=None, + integrator_function=integrator_function, initial_value=initial_value, noise=noise, clip=clip, diff --git a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py index 9fa1f4ed9f0..c521d123319 100644 --- a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py +++ b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py @@ -72,11 +72,7 @@ from psyneulink.core.components.functions.function import get_matrix from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection from psyneulink.core.components.projections.projection import projection_keywords -from psyneulink.core.components.shellclasses import Mechanism -from psyneulink.core.components.ports.outputport import OutputPort -from psyneulink.core.globals.context import ContextFlags -from psyneulink.core.globals.keywords import DEFAULT_MATRIX, FUNCTION_PARAMS, MASKED_MAPPING_PROJECTION, MATRIX -from psyneulink.core.globals.parameters import Parameter +from psyneulink.core.globals.keywords import MASKED_MAPPING_PROJECTION, MATRIX from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel diff --git a/psyneulink/library/compositions/autodiffcomposition.py b/psyneulink/library/compositions/autodiffcomposition.py index c1c8d79df99..81142b6f4f2 100644 --- a/psyneulink/library/compositions/autodiffcomposition.py +++ b/psyneulink/library/compositions/autodiffcomposition.py @@ -148,7 +148,7 @@ import ReportOutput, ReportParams, ReportProgress, ReportSimulations, ReportDevices, \ LEARN_REPORT, EXECUTE_REPORT, PROGRESS_REPORT from psyneulink.core.globals.context import Context, ContextFlags, handle_external_context -from psyneulink.core.globals.keywords import SOFT_CLAMP +from psyneulink.core.globals.keywords import AUTODIFF_COMPOSITION, SOFT_CLAMP from psyneulink.core.scheduling.scheduler import Scheduler from psyneulink.core.globals.parameters import Parameter from psyneulink.core.scheduling.time import TimeScale @@ -210,6 +210,7 @@ class AutodiffComposition(Composition): """ + componentCategory = AUTODIFF_COMPOSITION class Parameters(Composition.Parameters): """""" optimizer = None @@ -557,6 +558,9 @@ def execute(self, report_num=report_num ) + def _get_state_ids(self): + return super()._get_state_ids() + ["optimizer"] + def _get_state_struct_type(self, ctx): comp_state_type_list = ctx.get_state_struct_type(super()) pytorch_representation = self._build_pytorch_representation() diff --git a/psyneulink/library/compositions/compositionrunner.py b/psyneulink/library/compositions/compositionrunner.py index 4eb0fbc4049..d7039a1902e 100644 --- a/psyneulink/library/compositions/compositionrunner.py +++ b/psyneulink/library/compositions/compositionrunner.py @@ -177,7 +177,7 @@ def run_learning(self, for stim_input, stim_target, stim_epoch in zip(inputs, targets, epochs): if not callable(stim_input) and 'epochs' in stim_input: - stim_epoch = stim_input['epochs'] + stim_epoch = stim_input['epochs'] stim_input, num_input_trials = self._composition._parse_learning_spec(stim_input, stim_target) diff --git a/psyneulink/library/compositions/gymforagercfa.py b/psyneulink/library/compositions/gymforagercfa.py index 60a8741e7d5..64250e035f6 100644 --- a/psyneulink/library/compositions/gymforagercfa.py +++ b/psyneulink/library/compositions/gymforagercfa.py @@ -36,11 +36,11 @@ Parameterizes weights of a `update_weights ` used by its `evaluate ` method to predict the `net_outcome ` - for a `Composition` (or part of one) controlled by an `OptimiziationControlMechanism`, from a set of `state_feature_values + for a `Composition` (or part of one) controlled by an `OptimizationControlMechanism`, from a set of `state_feature_values ` and a `control_allocation ` - provided by the OptimiziationControlMechanism. + provided by the OptimizationControlMechanism. - The `state_feature_values ` and `control_allocation + The `state_feature_values ` and `control_allocation ` passed to the RegressorCFA's `adapt ` method, and provided as the input to its `update_weights `, are represented in the `vector ` attribute of a `PredictionVector` assigned to the RegressorCFA`s @@ -99,7 +99,7 @@ class GymForagerCFA(RegressionCFA): prediction_terms=None) Subclass of `RegressionCFA` that implements a CompositionFunctionApproximator as the - `agent_rep ` of an `OptimizationControlmechanism`. + `agent_rep ` of an `OptimizationControlMechanism`. See `RegressionCFA ` for arguments and attributes. @@ -166,13 +166,13 @@ def adapt(self, feature_values, control_allocation, net_outcome, context=None): # FIX: RENAME AS _EXECUTE_AS_REP ONCE SAME IS DONE FOR COMPOSITION # def evaluate(self, control_allocation, num_samples, reset_stateful_functions_to, state_feature_values, context): - def evaluate(self, feature_values, control_allocation, num_estimates, context): + def evaluate(self, feature_values, control_allocation, num_estimates, num_trials_per_estimate, context): """Update prediction_vector `, then multiply by regression_weights. Uses the current values of `regression_weights ` together with values of **control_allocation** and **state_feature_values** arguments to generate predicted `net_outcome - `. + `. .. note:: If this method is assigned as the `objective_funtion of a `GradientOptimization` `Function`, diff --git a/psyneulink/library/compositions/regressioncfa.py b/psyneulink/library/compositions/regressioncfa.py index cdff7135d99..7682d9ecbba 100644 --- a/psyneulink/library/compositions/regressioncfa.py +++ b/psyneulink/library/compositions/regressioncfa.py @@ -25,34 +25,37 @@ A `RegressionCFA` is a subclass of `CompositionFunctionApproximator` that parameterizes a set of `regression_weights ` over trials to predict the `net_outcome ` for a `Composition` (or part of one) controlled by an `OptimizationControlMechanism`. The `regression_weights -` are updated by its `update_weights ` `LearningFunction` -assigned as its `adapt ` method, which is called by the `evaluate -` method to predict the `net_outcome ` for a -`Composition` (or part of one) controlled by an `OptimiziationControlMechanism`, based on a set of `state_feature_values -`, a `control_allocation `, -and the `net_outcome ` they produced, passed to it from an `OptimizationControlMechanism`. +` are updated by its `update_weights ` +`LearningFunction` assigned as its `adapt ` method, which is called by the +`evaluate ` method to predict the `net_outcome ` +for a `Composition` (or part of one) controlled by an `OptimizationControlMechanism`, based on a set of +`state_feature_values `, a `control_allocation +`, and the `net_outcome ` they produced, +passed to it from an `OptimizationControlMechanism`. COMMENT: Its `evaluate ` method calls its `update_weights ` to generate and return a predicted `net_outcome -` for a given set of `state_feature_values ` -and a `control_allocation ` provided by an `OptimizationControlMechanism`. +` for a given set of `state_feature_values +` and a `control_allocation ` +provided by an `OptimizationControlMechanism`. COMMENT -The `state_feature_values ` and `control_allocation -` passed to the RegressorCFA's `adapt ` method, and provided -as the input to its `update_weights `, are represented in the `vector -` attribute of a `PredictionVector` assigned to the RegressorCFA`s `prediction_vector -` attribute. The `state_feature_values ` are -assigned to the state_features field of the `prediction_vector `, and the `control_allocation +The `state_feature_values ` and `control_allocation +` passed to the RegressionCFA's `adapt ` method, and provided +as the input to its `update_weights `, are represented in the `vector +` attribute of a `PredictionVector` assigned to the RegressionCFA`s `prediction_vector +` attribute. The `state_feature_values +` are assigned to the state_features field of the +`prediction_vector `, and the `control_allocation ` is assigned to the control_allocation field of the `prediction_vector -`. The `prediction_vector ` may also contain fields +`. The `prediction_vector ` may also contain fields for the `costs ControlMechanism.costs` associated with the `control_allocation ` and for interactions among those terms. -The `regression_weights ` returned by the `update_weights -` are used by the RegressorCFA's `evaluate ` method to predict -the `net_outcome ` from the `prediction_vector `. +The `regression_weights ` returned by the `update_weights +` are used by the RegressionCFA's `evaluate ` method to predict +the `net_outcome ` from the `prediction_vector `. COMMENT: @@ -157,7 +160,7 @@ class RegressionCFA(CompositionFunctionApproximator): prediction_terms=[PV.F, PV.C, PV.COST]) Subclass of `CompositionFunctionApproximator` that implements a CompositionFunctionApproximator as the - `agent_rep ` of an `OptimizationControlmechanism`. + `agent_rep ` of an `OptimizationControlMechanism`. See `CompositionFunctionApproximator ` for additional arguments and attributes. @@ -168,16 +171,16 @@ class RegressionCFA(CompositionFunctionApproximator): update_weights : LearningFunction, function or method : default BayesGLM parameterizes the `regression_weights ` used by the `evaluate ` method to improve its prediction of `net_outcome ` - from a given set of `state_feature_values ` and a - `control_allocation ` provided by an `OptimiziationControlMechanism`. + from a given set of `state_feature_values ` and a + `control_allocation ` provided by an `OptimizationControlMechanism`. It must take a 2d array as its first argument, the first item of which is an array the same length of the `vector ` attribute of its `prediction_vector - `, and the second item a 1d array containing a scalar + `, and the second item a 1d array containing a scalar value that it tries predict. prediction_terms : List[PV] : default [PV.F, PV.C, PV.COST] terms to be included in (and thereby determines the length of) the `vector - ` attribute of the `prediction_vector `; + ` attribute of the `prediction_vector `; items are members of the `PV` enum; the default is [`F `, `C ` `FC `, `COST `]. If `None` is specified, the default values will automatically be assigned. @@ -187,25 +190,25 @@ class RegressionCFA(CompositionFunctionApproximator): update_weights : LearningFunction, function or method parameterizes the `regression_weights ` used by the `evaluate ` method to improve prediction of `net_outcome ` - from a given set of `state_feature_values ` and a - `control_allocation ` provided by an `OptimiziationControlMechanism`; - its result is assigned as the value of the `regression_weights ` attribute. + from a given set of `state_feature_values ` and a + `control_allocation ` provided by an `OptimizationControlMechanism`; + its result is assigned as the value of the `regression_weights ` attribute. prediction_terms : List[PV] terms included in `vector ` attribute of the - `prediction_vector `; items are members of the `PV` enum; the + `prediction_vector `; items are members of the `PV` enum; the default is [`F `, `C ` `FC `, `COST `]. prediction_vector : PredictionVector represents and manages values in its `vector ` attribute that are used by - `evaluate `, along with `regression_weights ` to + `evaluate `, along with `regression_weights ` to make its prediction. The values contained in the `vector ` attribute are - determined by `prediction_terms `. + determined by `prediction_terms `. regression_weights : 1d array - result returned by `update_weights , and used by - `evaluate ` method together with `prediction_vector ` - to generate predicted `net_outcome `. + result returned by `update_weights , and used by + `evaluate ` method together with `prediction_vector ` + to generate predicted `net_outcome `. """ @@ -282,9 +285,9 @@ def _instantiate_prediction_terms(self, prediction_terms): # def initialize(self, owner): def initialize(self, features_array, control_signals, context): - """Assign owner and instantiate `prediction_vector ` + """Assign owner and instantiate `prediction_vector ` - Must be called before RegressorCFA's methods can be used. + Must be called before RegressionCFA's methods can be used. """ prediction_terms = self.prediction_terms @@ -300,7 +303,7 @@ def initialize(self, features_array, control_signals, context): self.update_weights.reset({DEFAULT_VARIABLE: update_weights_default_variable}) def adapt(self, feature_values, control_allocation, net_outcome, context=None): - """Update `regression_weights ` so as to improve prediction of + """Update `regression_weights ` so as to improve prediction of **net_outcome** from **state_feature_values** and **control_allocation**. """ prediction_vector = self.parameters.prediction_vector._get(context) @@ -328,14 +331,15 @@ def adapt(self, feature_values, control_allocation, net_outcome, context=None): ) # FIX: RENAME AS _EXECUTE_AS_REP ONCE SAME IS DONE FOR COMPOSITION + # FIX: 11/3/21 - IMPLEMENT USE OF num_trials_per_estimate? # def evaluate(self, control_allocation, num_samples, reset_stateful_functions_to, state_feature_values, context): - def evaluate(self, feature_values, control_allocation, num_estimates, context): - """Update prediction_vector `, + def evaluate(self, feature_values, control_allocation, num_estimates, num_trials_per_estimate, context): + """Update prediction_vector `, then multiply by regression_weights. - Uses the current values of `regression_weights ` together with - values of **control_allocation** and **state_feature_values** arguments to generate predicted `net_outcome - `. + Uses the current values of `regression_weights ` together with + values of **state_feature_values** and **control_allocation** arguments to generate predicted `net_outcome + `. .. note:: If this method is assigned as the `objective_funtion of a `GradientOptimization` `Function`, @@ -345,6 +349,7 @@ def evaluate(self, feature_values, control_allocation, num_estimates, context): predicted_outcome=0 prediction_vector = self.parameters.prediction_vector._get(context) + num_trials_per_estimate = num_trials_per_estimate or 1 num_estimates = num_estimates or 1 for i in range(num_estimates): @@ -354,7 +359,7 @@ def evaluate(self, feature_values, control_allocation, num_estimates, context): term_values_dict = prediction_vector.compute_terms(control_allocation, context=context) # FIX: THIS SHOULD GET A SAMPLE RATHER THAN JUST USE THE ONE RETURNED FROM ADAPT METHOD # OR SHOULD MULTIPLE SAMPLES BE DRAWN AND AVERAGED AT END OF ADAPT METHOD? - # I.E., AVERAGE WEIGHTS AND THEN OPTIMIZE OR OTPIMZE FOR EACH SAMPLE OF WEIGHTS AND THEN AVERAGE? + # I.E., AVERAGE WEIGHTS AND THEN OPTIMIZE OR OPTIMZE FOR EACH SAMPLE OF WEIGHTS AND THEN AVERAGE? weights = self.parameters.regression_weights._get(context) v = np.array([]) @@ -366,7 +371,7 @@ def evaluate(self, feature_values, control_allocation, num_estimates, context): item_idx = prediction_vector.idx[pv_enum_val] v = np.append(v, term_value.reshape(-1)) w = np.append(w, weights[item_idx]) - # Get predicted outcome for this esimtate and add to sum over estimates + # Get predicted outcome for this estimate and add to sum over estimates predicted_outcome += np.dot(v,w) # Compute average over estimates @@ -386,7 +391,7 @@ class PredictionVector(): Arguments --------- - feature_values : 2d nparray + feature_values : 2d array arrays of state_features to assign as the `PV.F` term of `terms `. control_signals : List[ControlSignal] @@ -585,7 +590,7 @@ def __call__(self, terms:tc.any(PV, list))->tc.any(PV, tuple): def update_vector(self, variable, feature_values=None, context=None): """Update vector with flattened versions of values returned from the `compute_terms ` method of the `prediction_vector - `. + `. Updates `vector ` with current values of variable and, optionally, and state_feature_values. diff --git a/psyneulink/library/models/Cohen_Huston1994_horse_race.py b/psyneulink/library/models/Cohen_Huston1994_horse_race.py index 4c318f20350..54774e9cfdd 100644 --- a/psyneulink/library/models/Cohen_Huston1994_horse_race.py +++ b/psyneulink/library/models/Cohen_Huston1994_horse_race.py @@ -206,6 +206,7 @@ def pass_threshold2(response_layer, thresh, terminate, context): return True return False + # Create different terminate trial conditions -------------------------------------------------------------------------- terminate_trial = { pnl.TimeScale.TRIAL: pnl.While(pass_threshold, response_layer, threshold) @@ -241,6 +242,7 @@ def trial_dict(red_color, green_color, neutral_color, red_word, green_word, neut } return trialdict + # Define initialization trials separately # WR_initialize_input = trial_dict(0, 0, 0, 0, 0, 0, 0, 1) CN_initialize_input = trial_dict(0, 0, 0, 0, 0, 0, 1, 0) diff --git a/requirements.txt b/requirements.txt index 2a3be343b5e..50b561f313d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,18 +1,18 @@ autograd<=1.3 -graph-scheduler>=0.2.0, <=1.0.0rc2 +graph-scheduler>=0.2.0, <1.0.1 dill<=0.32 -elfi<0.8.1 -graphviz<0.18.0 -grpcio<1.35.0 -grpcio-tools<1.35.0 +elfi<0.8.3 +graphviz<0.20.0 +grpcio<1.43.0 +grpcio-tools<1.43.0 llvmlite<0.38 matplotlib<3.4.4 networkx<2.6 -numpy<1.21.3 -pillow<8.4.0 +numpy<1.21.4, >=1.17.0 +pillow<8.5.0 pint<0.18 -toposort<1.7 +toposort<1.8 torch>=1.8.0, <2.0.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' typecheck-decorator<=1.2 leabra-psyneulink<=0.3.2 -rich>=10.1, <10.10 +rich>=10.1, <10.13 diff --git a/setup.cfg b/setup.cfg index e0f95d884ac..915da5ce962 100644 --- a/setup.cfg +++ b/setup.cfg @@ -69,7 +69,7 @@ filterwarnings = [pycodestyle] # for code explanation see https://pep8.readthedocs.io/en/latest/intro.html#error-codes -ignore = E114,E115,E116,E117,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E203,E221,E225,E231,E241,E251,E252,E261,E262,E265,E301,E302,E303,E305,E306,E501,E712,E721,E722,E731,E741,W503,W504,W605 +ignore = E114,E115,E116,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E203,E221,E225,E231,E241,E251,E252,E261,E262,E265,E301,E302,E303,E306,E501,E721,E722,E731,E741,W503,W504,W605 exclude = .git/*,Scripts/*,__pytest__/*,docs/*,bin/* [pydocstyle] @@ -79,7 +79,7 @@ match-dir = (?!Script)(?!bin)(?!docs).* [coverage:run] branch = True -concurrency = multiprocess +concurrency = multiprocessing source = psyneulink/ [coverage:report] diff --git a/tests/components/test_general.py b/tests/components/test_general.py index 9436fc28158..762bf894a07 100644 --- a/tests/components/test_general.py +++ b/tests/components/test_general.py @@ -1,6 +1,7 @@ import inspect import psyneulink as pnl import pytest +import re from psyneulink.core.components.projections.modulatory.modulatoryprojection import ModulatoryProjection_Base from psyneulink.core.components.projections.pathway.pathwayprojection import PathwayProjection_Base @@ -81,3 +82,104 @@ def test_parameters_user_specified(class_): + f' {class_.__name__}: {violators}' ) assert violators == set(), message + + +@pytest.fixture(scope='module') +def nested_compositions(): + comp = pnl.Composition(name='comp') + inner_comp = pnl.Composition(name='Inner Composition') + A = pnl.TransferMechanism( + function=pnl.Linear(slope=5.0, intercept=2.0), + name='A' + ) + B = pnl.TransferMechanism(function=pnl.Logistic, name='B') + C = pnl.RecurrentTransferMechanism(name='C') + D = pnl.IntegratorMechanism( + function=pnl.SimpleIntegrator(noise=pnl.NormalDist()), + name='D' + ) + E = pnl.TransferMechanism(name='E') + F = pnl.TransferMechanism(name='F') + + for m in [E, F]: + inner_comp.add_node(m) + + for m in [A, B, C, D, inner_comp]: + comp.add_node(m) + + comp.add_projection(pnl.MappingProjection(), A, B) + comp.add_projection(pnl.MappingProjection(), A, C) + comp.add_projection(pnl.MappingProjection(), B, D) + comp.add_projection(pnl.MappingProjection(), C, D) + comp.add_projection(pnl.MappingProjection(), C, inner_comp) + + inner_comp.add_projection(pnl.MappingProjection(), E, F) + + yield comp, inner_comp + + +@pytest.mark.parametrize( + 'filter_name, filter_regex, unknown_param_names', + [ + (None, None, []), + (None, 'slo$', ['slope']), + ('slo', None, ['slope']), + ('slo', 'slo$', ['slope']), + (['slope', 'seed'], None, []), + (None, ['slope', 'seed'], []), + (None, ['.*_param'], ['slope']), + ] +) +def test_all_dependent_parameters( + nested_compositions, + filter_name, + filter_regex, + unknown_param_names +): + comp, inner_comp = nested_compositions + + params_comp = comp.all_dependent_parameters(filter_name, filter_regex) + params_inner_comp = inner_comp.all_dependent_parameters( + filter_name, filter_regex + ) + + params_comp_keys = set(params_comp.keys()) + params_inner_comp_keys = set(params_inner_comp.keys()) + + assert params_inner_comp_keys.issubset(params_comp_keys) + assert( + len(params_comp_keys) == 0 + or not params_comp_keys.issubset(params_inner_comp_keys) + ) + + if filter_name is not None: + if isinstance(filter_name, str): + filter_name = [filter_name] + + if filter_regex is not None: + if isinstance(filter_regex, str): + filter_regex = [filter_regex] + + for item, comp_name in [ + (params_comp, 'comp'), + (params_inner_comp, 'inner_comp') + ]: + for p in item: + assert p._owner._owner is item[p], (p.name, comp_name) + + matches = True + try: + matches = matches and p.name in filter_name + except TypeError: + pass + + try: + for pattern in filter_regex: + matches = matches or re.match(pattern, p.name) + except TypeError: + pass + + assert matches + + for p in unknown_param_names: + assert p not in item, (p.name, comp_name) diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 3713f22c81d..71cdcffdce1 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -9,10 +9,10 @@ from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination from psyneulink.core.components.functions.nonstateful.learningfunctions import Reinforcement, BackPropagation from psyneulink.core.components.functions.nonstateful.optimizationfunctions import GridSearch -from psyneulink.core.components.functions.stateful.integratorfunctions import \ - AdaptiveIntegrator, DriftDiffusionIntegrator, IntegratorFunction, SimpleIntegrator from psyneulink.core.components.functions.nonstateful.transferfunctions import \ Linear, Logistic, INTENSITY_COST_FCT_MULTIPLICATIVE_PARAM +from psyneulink.core.components.functions.stateful.integratorfunctions import \ + AdaptiveIntegrator, DriftDiffusionIntegrator, IntegratorFunction, SimpleIntegrator from psyneulink.core.components.functions.userdefinedfunction import UserDefinedFunction from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import \ @@ -30,7 +30,7 @@ from psyneulink.core.globals.context import Context from psyneulink.core.globals.keywords import \ ADDITIVE, ALLOCATION_SAMPLES, BEFORE, DEFAULT, DISABLE, INPUT_PORT, INTERCEPT, LEARNING_MECHANISMS, \ - LEARNED_PROJECTIONS, \ + LEARNED_PROJECTIONS, RANDOM_CONNECTIVITY_MATRIX, \ NAME, PROJECTIONS, RESULT, OBJECTIVE_MECHANISM, OUTPUT_MECHANISM, OVERRIDE, SLOPE, TARGET_MECHANISM, VARIANCE from psyneulink.core.scheduling.condition import AtTimeStep, AtTrial, Never, TimeInterval from psyneulink.core.scheduling.condition import EveryNCalls @@ -362,6 +362,65 @@ def test_add_proj_weights_only(self): assert np.allclose(B.parameters.value.get(comp), [[22.4, 29.6]]) assert np.allclose(proj.matrix.base, weights) + test_args = [(None, ([1],[1],[1],[1])), + ('list', ([[0.60276338]],[[0.64589411]],[[0.96366276]])), + ('set', ([[0.60276338]],[[0.64589411]],[[0.96366276]]))] + @pytest.mark.parametrize('projs, expected_matrices', test_args, ids=[x[0] for x in test_args]) + def test_add_multiple_projections_for_nested_compositions(self, projs, expected_matrices): + """Test automatic creation and explicit specification of Projections from outer Composition to multiple + Nodes of a nested Composition, and between Nodes of nested Compositions. + """ + + A = ProcessingMechanism(name='A') + B = ProcessingMechanism(name='B') + C = ProcessingMechanism(name='C') + D = ProcessingMechanism(name='D') + E = ProcessingMechanism(name='E') + F = ProcessingMechanism(name='F') + X = ProcessingMechanism(name='INPUT NODE') + M = ProcessingMechanism(name='MIDDLE NODE') + Y = ProcessingMechanism(name='OUTPUT NODE') + if projs is 'list': + iprojs = [MappingProjection(sender=C, receiver=D, matrix=RANDOM_CONNECTIVITY_MATRIX)] + oprojs = [MappingProjection(sender=X, receiver=A, matrix=RANDOM_CONNECTIVITY_MATRIX), + MappingProjection(sender=X, receiver=M, matrix=RANDOM_CONNECTIVITY_MATRIX)] + elif projs is 'set': + iprojs = {MappingProjection(sender=C, receiver=D, matrix=RANDOM_CONNECTIVITY_MATRIX)} + oprojs = {MappingProjection(sender=X, receiver=A, matrix=RANDOM_CONNECTIVITY_MATRIX), + MappingProjection(sender=X, receiver=M, matrix=RANDOM_CONNECTIVITY_MATRIX)} + + comp1 = Composition(pathways=[A,B,C], name='COMP 1') + comp2 = Composition(pathways=[D,E,F], name='COMP 2') + + if not projs: + ipway = [comp1, comp2] + else: + ipway = [comp1, iprojs, comp2] + mcomp = Composition(pathways=[ipway,M], name='MIDDLE COMPOSITION') + + if not projs: + opway = [[X, mcomp, Y, C]] + else: + opway = [[X, oprojs, mcomp, Y, C]] + ocomp = Composition(pathways=opway, name='OUTER COMPOSITION') + + # gv = ocomp.show_graph(output_fmt=source, show_CIM=True, show_node_structure=True) + # assert gv = expected + if not projs: + assert (comp1.output_CIM.output_ports[0].efferents[0].matrix.base == + comp2.input_CIM.input_ports[0].path_afferents[0].matrix.base == expected_matrices[0]) + assert (X.output_ports[0].efferents[0].matrix.base == + mcomp.input_CIM.input_ports[0].path_afferents[0].matrix.base == expected_matrices[1]) + assert (X.output_ports[0].efferents[1].matrix.base == + mcomp.input_CIM.input_ports[1].path_afferents[0].matrix.base == expected_matrices[2]) + else: + assert np.allclose(comp1.output_CIM.output_ports[0].efferents[0].matrix.base, expected_matrices[0]) + assert np.allclose(comp2.input_CIM.input_ports[0].path_afferents[0].matrix.base, expected_matrices[0]) + assert np.allclose(X.output_ports[0].efferents[0].matrix.base, expected_matrices[1]) + assert np.allclose(mcomp.input_CIM.input_ports[0].path_afferents[0].matrix.base, expected_matrices[1]) + assert np.allclose(X.output_ports[0].efferents[1].matrix.base, expected_matrices[2]) + assert np.allclose(mcomp.input_CIM.input_ports[1].path_afferents[0].matrix.base, expected_matrices[2]) + def test_add_linear_processing_pathway_with_noderole_specified_in_tuple(self): comp = Composition() A = TransferMechanism(name='composition-pytests-A') @@ -2277,6 +2336,7 @@ def test_exact_time(self): assert comp.scheduler.execution_list[comp.default_execution_id] == [{A, B}] assert comp.scheduler.execution_timestamps[comp.default_execution_id][0].absolute == 1 * pnl._unit_registry.ms + class TestGetMechanismsByRole: def test_multiple_roles(self): @@ -4131,7 +4191,7 @@ def test_invalid_projection_deletion_when_nesting_comps(self): pnl.OptimizationControlMechanism( agent_rep=ocomp, state_features=[oa.input_port], - # state_feature_function=pnl.Buffer(history=2), + # state_feature_functions=pnl.Buffer(history=2), name="Controller", objective_mechanism=ocomp_objective_mechanism, function=pnl.GridSearch(direction=pnl.MINIMIZE), @@ -4151,7 +4211,7 @@ def test_invalid_projection_deletion_when_nesting_comps(self): pnl.OptimizationControlMechanism( agent_rep=icomp, state_features=[ia.input_port], - # state_feature_function=pnl.Buffer(history=2), + # state_feature_functions=pnl.Buffer(history=2), name="Controller", objective_mechanism=icomp_objective_mechanism, function=pnl.GridSearch(direction=pnl.MAXIMIZE), @@ -4298,82 +4358,82 @@ def test_combine_two_disjunct_trees(self): assert myMech6 in terminals def test_combine_two_overlapping_trees(self): - # Goal: - - # Mech1 -- - # --> Mech3 -- - # Mech2 -- --> Mech5 - # Mech4 -- - - # create first composition ----------------------------------------------- - - # Mech1 -- - # --> Mech3 - # Mech2 -- - - tree1 = Composition() - - myMech1 = TransferMechanism(name="myMech1") - myMech2 = TransferMechanism(name="myMech2") - myMech3 = TransferMechanism(name="myMech3") - myMech4 = TransferMechanism(name="myMech4") - myMech5 = TransferMechanism(name="myMech5") - - tree1.add_node(myMech1) - tree1.add_node(myMech2) - tree1.add_node(myMech3) - tree1.add_projection(MappingProjection(sender=myMech1, receiver=myMech3), myMech1, myMech3) - tree1.add_projection(MappingProjection(sender=myMech2, receiver=myMech3), myMech2, myMech3) - - # validate first composition --------------------------------------------- - - tree1._analyze_graph() - origins = tree1.get_nodes_by_role(NodeRole.ORIGIN) - assert len(origins) == 2 - assert myMech1 in origins - assert myMech2 in origins - terminals = tree1.get_nodes_by_role(NodeRole.TERMINAL) - assert len(terminals) == 1 - assert myMech3 in terminals - - # create second composition ---------------------------------------------- - - # Mech3 -- - # --> Mech5 - # Mech4 -- - - tree2 = Composition() - tree2.add_node(myMech3) - tree2.add_node(myMech4) - tree2.add_node(myMech5) - tree2.add_projection(MappingProjection(sender=myMech3, receiver=myMech5), myMech3, myMech5) - tree2.add_projection(MappingProjection(sender=myMech4, receiver=myMech5), myMech4, myMech5) - - # validate second composition ---------------------------------------------- - - tree2._analyze_graph() - origins = tree2.get_nodes_by_role(NodeRole.ORIGIN) - assert len(origins) == 2 - assert myMech3 in origins - assert myMech4 in origins - terminals = tree2.get_nodes_by_role(NodeRole.TERMINAL) - assert len(terminals) == 1 - assert myMech5 in terminals - - # combine the compositions ------------------------------------------------- - - tree1.add_pathway(tree2) - tree1._analyze_graph() - # no need for a projection connecting the two compositions because they share myMech3 - - origins = tree1.get_nodes_by_role(NodeRole.ORIGIN) - assert len(origins) == 3 - assert myMech1 in origins - assert myMech2 in origins - assert myMech4 in origins - terminals = tree1.get_nodes_by_role(NodeRole.TERMINAL) - assert len(terminals) == 1 - assert myMech5 in terminals + # Goal: + + # Mech1 -- + # --> Mech3 -- + # Mech2 -- --> Mech5 + # Mech4 -- + + # create first composition ----------------------------------------------- + + # Mech1 -- + # --> Mech3 + # Mech2 -- + + tree1 = Composition() + + myMech1 = TransferMechanism(name="myMech1") + myMech2 = TransferMechanism(name="myMech2") + myMech3 = TransferMechanism(name="myMech3") + myMech4 = TransferMechanism(name="myMech4") + myMech5 = TransferMechanism(name="myMech5") + + tree1.add_node(myMech1) + tree1.add_node(myMech2) + tree1.add_node(myMech3) + tree1.add_projection(MappingProjection(sender=myMech1, receiver=myMech3), myMech1, myMech3) + tree1.add_projection(MappingProjection(sender=myMech2, receiver=myMech3), myMech2, myMech3) + + # validate first composition --------------------------------------------- + + tree1._analyze_graph() + origins = tree1.get_nodes_by_role(NodeRole.ORIGIN) + assert len(origins) == 2 + assert myMech1 in origins + assert myMech2 in origins + terminals = tree1.get_nodes_by_role(NodeRole.TERMINAL) + assert len(terminals) == 1 + assert myMech3 in terminals + + # create second composition ---------------------------------------------- + + # Mech3 -- + # --> Mech5 + # Mech4 -- + + tree2 = Composition() + tree2.add_node(myMech3) + tree2.add_node(myMech4) + tree2.add_node(myMech5) + tree2.add_projection(MappingProjection(sender=myMech3, receiver=myMech5), myMech3, myMech5) + tree2.add_projection(MappingProjection(sender=myMech4, receiver=myMech5), myMech4, myMech5) + + # validate second composition ---------------------------------------------- + + tree2._analyze_graph() + origins = tree2.get_nodes_by_role(NodeRole.ORIGIN) + assert len(origins) == 2 + assert myMech3 in origins + assert myMech4 in origins + terminals = tree2.get_nodes_by_role(NodeRole.TERMINAL) + assert len(terminals) == 1 + assert myMech5 in terminals + + # combine the compositions ------------------------------------------------- + + tree1.add_pathway(tree2) + tree1._analyze_graph() + # no need for a projection connecting the two compositions because they share myMech3 + + origins = tree1.get_nodes_by_role(NodeRole.ORIGIN) + assert len(origins) == 3 + assert myMech1 in origins + assert myMech2 in origins + assert myMech4 in origins + terminals = tree1.get_nodes_by_role(NodeRole.TERMINAL) + assert len(terminals) == 1 + assert myMech5 in terminals # MODIFIED 5/8/20 OLD: ELIMINATE SYSTEM: # FIX SHOULD THESE BE RE-WRITTEN WITH STANDARD NESTED COMPOSITIONS AND PATHWAYS? @@ -4653,6 +4713,82 @@ def test_four_level_nested_dual_OCM_control(self): result = c_lvl0.run([5]) assert result == [4500] + @pytest.mark.parametrize('nesting', ("unnested", "nested")) + def test_partially_overlapping_local_and_control_mech_control_specs_in_unnested_and_nested_comp(self, nesting): + pnl.clear_registry() + samples = np.arange(0.1, 1.01, 0.3) + Input = pnl.TransferMechanism(name='Input') + reward = pnl.TransferMechanism(output_ports=[pnl.RESULT, pnl.MEAN, pnl.VARIANCE], + name='reward', + ) + Decision = pnl.DDM(function=pnl.DriftDiffusionAnalytical(drift_rate=(1.0, + pnl.ControlProjection(function=pnl.Linear, + control_signal_params={ + pnl.ALLOCATION_SAMPLES: samples, + })), + threshold=(1.0, + pnl.ControlProjection(function=pnl.Linear, + control_signal_params={ + pnl.ALLOCATION_SAMPLES: samples, + })), + noise=0.5, + starting_point=0, + t0=0.45), + output_ports=[pnl.DECISION_VARIABLE, + pnl.RESPONSE_TIME, + pnl.PROBABILITY_UPPER_THRESHOLD], + name='Decision') + Response = pnl.DDM(function=pnl.DriftDiffusionAnalytical(drift_rate=1.0, + threshold=1.0, + noise=0.5, + starting_point=0, + t0=0.45), + output_ports=[pnl.DECISION_VARIABLE, + pnl.RESPONSE_TIME, + pnl.PROBABILITY_UPPER_THRESHOLD], + name='Response') + + icomp = pnl.Composition(name="EVC (inner comp)", retain_old_simulation_data=True) + icomp.add_node(reward, required_roles=[pnl.NodeRole.OUTPUT]) + icomp.add_node(Decision, required_roles=[pnl.NodeRole.OUTPUT]) + icomp.add_node(Response, required_roles=[pnl.NodeRole.OUTPUT]) + icomp.add_linear_processing_pathway([Input, pnl.IDENTITY_MATRIX, Decision, Response]) + if nesting == 'nested': + comp = Composition(nodes=icomp, name="Outer Composition") + else: + comp = icomp + ocm=OptimizationControlMechanism( + agent_rep=comp, + monitor_for_control=[Decision.output_ports[pnl.DECISION_VARIABLE], + Decision.output_ports[pnl.RESPONSE_TIME]], + num_estimates=1, + function=GridSearch, + control_signals=[ + ControlSignal(modulates=('drift_rate',Decision), # OVERLAPS WITH CONTROL SPEC ON Decision + allocation_samples=[1,2]), + ControlSignal(modulates=('threshold',Response), # ADDS CONTROL SPEC FOR Response + allocation_samples=[1,2]), + ] + ) + comp.add_controller(ocm) + + assert len(comp.controller.input_ports[pnl.OUTCOME].path_afferents) == 2 + if nesting == 'nested': + # All Projections to controller's OUTCOME InputPort should be from input_CIM + assert all(isinstance(comp.controller.input_ports[pnl.OUTCOME].path_afferents[i].sender.owner, + pnl.CompositionInterfaceMechanism) for i in range(2)) + + assert len(comp.controller.control_signals) == 4 # Should be 4: Decision threshold (spec'd locally on mech) + # Decision drift_rate (spec'd on mech and OCM) + # Response threshold (spec'd on OCM) + # RANDOMIZATION + ctl_sig_names = ['Decision[drift_rate] ControlSignal', 'Decision[threshold] ControlSignal', + 'Response[threshold] ControlSignal', 'RANDOMIZATION_CONTROL_SIGNAL'] + assert all([name in ctl_sig_names for name in comp.controller.control_signals.names]) + if nesting == 'nested': + # All of the controller's ControlSignals should project to the ParameterCIM for the nested comp + assert all(isinstance(comp.controller.control_signals[i].efferents[0].receiver.owner, + pnl.CompositionInterfaceMechanism) for i in range(4)) class TestOverloadedCompositions: def test_mechanism_different_inputs(self): @@ -5705,7 +5841,11 @@ def test_two_origins(self): assert A.value == [[1.0]] assert B.value == [[1.0]] - assert comp.shadows[A] == [B] + + # Since B is both an INPUT Node and also shadows A, it should have two afferent Projections, + # one from it own OutputPort of the Composition's input_CIM, and another from the one for A + # assert len(B.path_afferents)==2 + # assert B.input_port.path_afferents[1].sender is A.input_port.path_afferents[0].sender C = ProcessingMechanism(name='C') comp.add_linear_processing_pathway([C, A]) @@ -5720,7 +5860,7 @@ def test_two_origins(self): assert len(B.path_afferents) == 1 assert B.path_afferents[0].sender.owner == C - def test_two_origins_two_input_ports(self): + def test_shadow_internal_projectionstest_two_origins_two_input_ports(self): comp = Composition(name='comp') A = ProcessingMechanism(name='A', function=Linear(slope=2.0)) @@ -5732,15 +5872,20 @@ def test_two_origins_two_input_ports(self): assert A.value == [[2.0]] assert np.allclose(B.value, [[1.0], [2.0]]) - assert comp.shadows[A] == [B] + + assert len(B.input_ports)==2 + assert len(B.input_ports[0].path_afferents)==1 + assert len(B.input_ports[1].path_afferents)==1 + assert B.input_ports[0].path_afferents[0].sender is A.input_ports[0].path_afferents[0].sender + assert B.input_ports[1].path_afferents[0].sender is A.output_ports[0] C = ProcessingMechanism(name='C') comp.add_linear_processing_pathway([C, A]) comp.run(inputs={C: 1.5}) assert A.value == [[3.0]] - assert np.allclose(B.value, [[1.5], [3.0]]) assert C.value == [[1.5]] + assert np.allclose(B.value, [[1.5], [3.0]]) # Since B is shadowing A, its old projection from the CIM should be deleted, # and a new projection from C should be added @@ -5773,12 +5918,59 @@ def test_shadow_internal_projections(self): comp.add_linear_processing_pathway([A2, B]) comp.run(inputs={A: [[1.0]], A2: [[1.0]]}) - assert A.value == [[1.0]] assert A2.value == [[1.0]] assert B.value == [[2.0]] assert C.value == [[2.0]] + + _test_shadow_nested_nodes_arg =\ + [ + ('shadow_nodes_one_and_two_levels_deep', 0), + ('shadow_nested_internal_node', 1), + ], + + @pytest.mark.parametrize( + 'condition', + ['shadow_nodes_one_and_two_levels_deep', + 'shadow_nested_internal_node'], + ) + def test_shadow_nested_nodes(self, condition): + + I = ProcessingMechanism(name='I') + icomp = Composition(nodes=I, name='INNER COMP') + + A = ProcessingMechanism(name='A') + B = ProcessingMechanism(name='B') + C = ProcessingMechanism(name='C') + mcomp = Composition(pathways=[[A,B,C],icomp], name='MIDDLE COMP') + + if condition == 'shadow_nodes_one_and_two_levels_deep': + + # Confirm that B's shadow of I comes from the same ocomp_input_CIM that serves I + O = ProcessingMechanism(name='O',input_ports=[I.input_port, A.input_port]) + ocomp = Composition(nodes=[mcomp,O], name='OUTER COMP') + ocomp._analyze_graph() + assert len(O.afferents)==2 + assert O.input_ports[0].shadow_inputs.owner is I + receiver = icomp.input_CIM.port_map[I.input_port][0] + receiver = receiver.path_afferents[0].sender.owner.port_map[receiver][0] + assert O.input_ports[0].path_afferents[0].sender is \ + ocomp.input_CIM.port_map[receiver][1] + + # Confirm that B's shadow of A comes from the same ocomp_input_CIM that serves A + assert O.input_ports[1].shadow_inputs.owner is A + assert O.input_ports[1].path_afferents[0].sender is \ + mcomp.input_CIM.port_map[A.input_port][0].path_afferents[0].sender + + elif condition == 'shadow_nested_internal_node': + with pytest.raises(CompositionError) as err: + O = ProcessingMechanism(name='O',input_ports=[B.input_port]) + ocomp = Composition(nodes=[mcomp,O], name='OUTER COMP') + assert 'Attempt to shadow the input to a node (B) in a nested Composition of OUTER COMP ' \ + 'that is not an INPUT Node of that Composition is not currently supported.' \ + in err.value.error_value + def test_monitor_input_ports(self): comp = Composition(name='comp') @@ -6156,6 +6348,149 @@ def test_INTERNAL(self): assert comp.get_nodes_by_role(NodeRole.INTERNAL) == [B] + def test_input_labels_and_results_by_node_and_no_orphaning_of_nested_output_nodes(self): + """ + Test that nested Composition with two outputs, one of which Projects to a node in the outer Composition is, + by virtue of its other output, still assigned as an OUTPUT Node of the outer Composition + Also test get_input_format and get_results_by_nodes methods + """ + input_labels_dict = {0:{'red':0, 'green':1}} + output_labels_dict = {0:{'red':0, 'green':1}} + A = ProcessingMechanism(name='A', input_labels=input_labels_dict) + B = ProcessingMechanism(name='B') + C = ProcessingMechanism(name='C') + icomp = Composition(pathways=[[A,B,C]], name='INNER COMP') + + X = ProcessingMechanism(name='X') + Y = ProcessingMechanism(name='Y') + Z = ProcessingMechanism(name='Z', output_labels=output_labels_dict) + mcomp = Composition(pathways=[[X,Y,Z],icomp], name='MIDDLE COMP') + + Q = ProcessingMechanism(name='Q', input_labels=input_labels_dict) + O = ProcessingMechanism(name='O', input_ports=[Z]) + ocomp = Composition(name='OUTER COMP', nodes=[O, mcomp,Q]) + + len(ocomp.output_values)==3 + result = ocomp.run(inputs={mcomp:[[0],[0]]}) + assert len(result)==4 + + input_format = ocomp.get_input_format() + assert repr(input_format) == '\'{\\n\\tMIDDLE COMP: [[0.0],[0.0]],\\n\\tQ: [[0.0]]\\n}\'' + input_format = ocomp.get_input_format(num_trials=3, use_labels=True) + assert repr(input_format) == '"{\\n\\tMIDDLE COMP: [ [[[0.0]],[\'red\']], [[[0.0]],[\'green\']], [[[0.0]],[\'red\']] ],\\n\\tQ: [ [\'red\'], [\'green\'], [\'red\'] ]\\n}"' + input_format = ocomp.get_input_format(num_trials=2, show_nested_input_nodes=True) + assert input_format == '\nInputs to (nested) INPUT Nodes of OUTER COMP for 2 trials:\n\tMIDDLE COMP: \n\t\tX: [ [[0.0]], [[0.0]] ]\n\t\tINNER COMP: \n\t\t\tA: [ [[0.0]], [[0.0]] ]\n\tQ: [ [[0.0]], [[0.0]] \n\nFormat as follows for inputs to run():\n{\n\tMIDDLE COMP: [ [[0.0],[0.0]], [[0.0],[0.0]] ],\n\tQ: [ [[0.0]], [[0.0]] ]\n}' + input_format = ocomp.get_input_format(num_trials=2, show_nested_input_nodes=True, use_labels=True) + assert input_format == "\nInputs to (nested) INPUT Nodes of OUTER COMP for 2 trials:\n\tMIDDLE COMP: \n\t\tX: [ [[0.0]], [[0.0]] ]\n\t\tINNER COMP: \n\t\t\tA: [ ['red'], ['green'] ]\n\tQ: [ ['red'], ['green'] \n\nFormat as follows for inputs to run():\n{\n\tMIDDLE COMP: [ [[0.0],[0.0]], [[0.0],[0.0]] ],\n\tQ: [ [[0.0]], [[0.0]] ]\n}" + + result = ocomp.run(inputs={mcomp:[[.2],['green']], Q:[4.6]}) + assert result == [[0.2], [0.2], [1.],[4.6]] + results_by_node = ocomp.get_results_by_nodes() + assert results_by_node[O] == [0.2] + assert results_by_node[Z] == [0.2] + assert results_by_node[C] == [1.0] + assert results_by_node[Q] == [4.6] + results_by_node = ocomp.get_results_by_nodes(use_names=True) + assert repr(results_by_node) == '{\'O\': [0.2], \'Z\': [0.2], \'C\': [1.0], \'Q\': [4.6]}' + results_by_node = ocomp.get_results_by_nodes(use_names=True, use_labels=True) + assert repr(results_by_node) == '{\'O\': [[0.2]], \'Z\': [\'red\'], \'C\': [[1.0]], \'Q\': [[4.6]]}' + results_by_node = ocomp.get_results_by_nodes(nodes=[Q, Z]) + assert repr(results_by_node) == '{(ProcessingMechanism Z): [0.2], (ProcessingMechanism Q): [4.6]}' + results_by_node = ocomp.get_results_by_nodes(nodes=Q, use_names=True) + assert repr(results_by_node) == '{\'Q\': [4.6]}' + results_by_node = ocomp.get_results_by_nodes(nodes=Z, use_labels=True) + assert repr(results_by_node) == '{(ProcessingMechanism Z): [\'red\']}' + + label_not_in_dict_error_msg = '"Inappropriate use of \'purple\' as a stimulus for A in MIDDLE COMP: ' \ + 'it is not a label in its input_labels_dict."' + with pytest.raises(CompositionError) as error_text: + ocomp.run(inputs={mcomp:[[0],['purple']],Q:['red']}) + assert label_not_in_dict_error_msg in str(error_text.value) + + no_label_dict_error_msg = '"Inappropriate use of str (\'red\') as a stimulus for X in MIDDLE COMP: ' \ + 'it does not have an input_labels_dict."' + with pytest.raises(CompositionError) as error_text: + ocomp.run(inputs={mcomp:[['red'],['red']],Q:['red']}) + assert no_label_dict_error_msg in str(error_text.value) + + no_such_node_error_msg = '"Nodes specified in get_results_by_nodes() method not found in OUTER COMP ' \ + 'nor any Compositions nested within it: [\'N\']"' + with pytest.raises(CompositionError) as error_text: + ocomp.get_results_by_nodes(nodes=['N']) + assert no_such_node_error_msg in str(error_text.value) + + + + def test_unnested_PROBE(self): + A = ProcessingMechanism(name='A') + B = ProcessingMechanism(name='B') + C = ProcessingMechanism(name='C') + comp = Composition(pathways=[A,(B, NodeRole.PROBE), C], name='COMP') + assert B.output_port in comp.output_CIM.port_map + + params = [ # id allow_probes include_probes_in_output err_msg + ( + "allow_probes_True", True, False, None + ), + ( + "allow_probes_True", True, True, None + ), + ( + "allow_probes_False", False, False, + "B found in nested Composition of OUTER COMP (MIDDLE COMP) but without required NodeRole.OUTPUT." + ), + ( + "allow_probes_CONTROL", "CONTROL", True, + "B found in nested Composition of OUTER COMP (MIDDLE COMP) but without required NodeRole.OUTPUT." + ) + ] + @pytest.mark.parametrize('id, allow_probes, include_probes_in_output, err_msg', params, ids=[x[0] for x in params]) + def test_nested_PROBES(self, id, allow_probes, include_probes_in_output, err_msg): + """Test use of allow_probes, include_probes_in_output and orphaned output from nested comp""" + + A = ProcessingMechanism(name='A') + B = ProcessingMechanism(name='B') + C = ProcessingMechanism(name='C') + icomp = Composition(pathways=[[A,B,C]], name='INNER COMP') + + X = ProcessingMechanism(name='X') + Y = ProcessingMechanism(name='Y') + Z = ProcessingMechanism(name='Z') + mcomp = Composition(pathways=[[X,Y,Z],icomp], name='MIDDLE COMP') + + O = ProcessingMechanism(name='O', + input_ports=[B, Y] + ) + + if not err_msg: + ocomp = Composition(name='OUTER COMP', + # node=[0,mcomp], # <- CRASHES + nodes=[mcomp,O], + allow_probes=allow_probes, + include_probes_in_output=include_probes_in_output + ) + # ocomp.show_graph(show_cim=True, show_node_structure=True) + + assert B.output_port in icomp.output_CIM.port_map + # assert B.output_port in mcomp.output_CIM.port_map + assert Y.output_port in mcomp.output_CIM.port_map + if include_probes_in_output is False: + assert len(ocomp.output_values)==4 # Should only be outputs from mcomp (C, Z) and O + result = ocomp.run(inputs={mcomp:[[0],[0]]}) + assert len(result)==4 # Should only be outputs from mcomp (C, Z) and O + elif include_probes_in_output is True: + assert len(ocomp.output_values)==6 # Outputs from mcomp (C and Z) and O (from B and Y) + result = ocomp.run(inputs={mcomp:[[0],[0]]}) # This tests that outputs from mcomp (C and Z) are included + assert len(result)==6 # even though mcomp also projects to O (for B and Y) + else: + with pytest.raises(CompositionError) as err: + ocomp = Composition(name='OUTER COMP', + nodes=[mcomp,O], + allow_probes=allow_probes, + include_probes_in_output=include_probes_in_output) + ocomp._analyze_graph() + assert err.value.error_value == err_msg + def test_two_node_cycle(self): A = TransferMechanism() B = TransferMechanism() diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 6d4acfe7bec..da45a9d2d8f 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -7,6 +7,7 @@ from psyneulink.core.globals.keywords import ALLOCATION_SAMPLES, PROJECTIONS from psyneulink.core.globals.log import LogCondition from psyneulink.core.globals.sampleiterator import SampleIterator, SampleIteratorError, SampleSpec +from psyneulink.core.globals.utilities import _SeededPhilox class TestControlSpecification: @@ -38,7 +39,7 @@ def test_add_node_with_control_specified_then_add_controller(self): comp.add_controller(ctl_mech) assert ddm.parameter_ports['drift_rate'].mod_afferents[0].sender.owner == comp.controller assert comp.controller.control_signals[0].efferents[0].receiver == ddm.parameter_ports['drift_rate'] - assert np.allclose(comp.controller.control[0].allocation_samples.base(), + assert np.allclose(comp.controller.control[0].allocation_samples(), [0.1, 0.4, 0.7000000000000001, 1.0000000000000002]) def test_add_controller_in_comp_constructor_then_add_node_with_control_specified(self): @@ -57,9 +58,10 @@ def test_add_controller_in_comp_constructor_then_add_node_with_control_specified ctl_mech = pnl.ControlMechanism() comp = pnl.Composition(controller=ctl_mech) comp.add_node(ddm) + comp._analyze_graph() assert comp.controller.control[0].efferents[0].receiver == ddm.parameter_ports['drift_rate'] assert ddm.parameter_ports['drift_rate'].mod_afferents[0].sender.owner == comp.controller - assert np.allclose(comp.controller.control[0].allocation_samples.base(), + assert np.allclose(comp.controller.control[0].allocation_samples(), [0.1, 0.4, 0.7000000000000001, 1.0000000000000002]) def test_redundant_control_spec_add_node_with_control_specified_then_controller_in_comp_constructor(self): @@ -76,7 +78,7 @@ def test_redundant_control_spec_add_node_with_control_specified_then_controller_ comp.add_controller(pnl.ControlMechanism(control_signals=("drift_rate", ddm))) assert comp.controller.control_signals[0].efferents[0].receiver == ddm.parameter_ports['drift_rate'] assert ddm.parameter_ports['drift_rate'].mod_afferents[0].sender.owner == comp.controller - assert comp.controller.control_signals[0].allocation_samples.base is None + assert comp.controller.control_signals[0].allocation_samples is None def test_redundant_control_spec_add_controller_in_comp_constructor_then_add_node_with_control_specified(self): # First create Composition with controller that has HAS control specification, @@ -91,7 +93,7 @@ def test_redundant_control_spec_add_controller_in_comp_constructor_then_add_node comp.add_node(ddm) assert comp.controller.control_signals[0].efferents[0].receiver == ddm.parameter_ports['drift_rate'] assert ddm.parameter_ports['drift_rate'].mod_afferents[0].sender.owner == comp.controller - assert comp.controller.control_signals[0].allocation_samples.base is None + assert comp.controller.control_signals[0].allocation_samples is None def test_redundant_control_spec_add_controller_in_comp_constructor_then_add_node_with_alloc_samples_specified(self): # First create Composition with controller that has HAS control specification, @@ -107,7 +109,7 @@ def test_redundant_control_spec_add_controller_in_comp_constructor_then_add_node comp.add_node(ddm) assert comp.controller.control_signals[0].efferents[0].receiver == ddm.parameter_ports['drift_rate'] assert ddm.parameter_ports['drift_rate'].mod_afferents[0].sender.owner == comp.controller - assert np.allclose(comp.controller.control[0].allocation_samples.base(), [0.2, 0.5, 0.8]) + assert np.allclose(comp.controller.control[0].allocation_samples(), [0.2, 0.5, 0.8]) def test_deferred_init(self): # Test to insure controller works the same regardless of whether it is added to a composition before or after @@ -146,7 +148,7 @@ def test_deferred_init(self): comp.add_controller(controller=pnl.OptimizationControlMechanism( agent_rep=comp, state_features=[Input.input_port, reward.input_port], - state_feature_function=pnl.AdaptiveIntegrator(rate=0.5), + state_feature_functions=pnl.AdaptiveIntegrator(rate=0.5), objective_mechanism=pnl.ObjectiveMechanism( function=pnl.LinearCombination(operation=pnl.PRODUCT), monitor=[reward, @@ -266,110 +268,236 @@ def test_partial_deferred_init(self): ]) ) - text = 'The controller of ocomp has been specified to project to deferred, but deferred is ' \ - 'not in ocomp or any of its nested Compositions. This projection will be deactivated ' \ - 'until deferred is added to ocomp in a compatible way.' - with pytest.warns(UserWarning, match=text): - # ocomp.show_graph(show_controller=True, show_cim=True) - # results = ocomp.run([5]) - result = ocomp.run({initial_node_a: [1]}) - - # result = 5, the input (1) multiplied by the value of the ControlSignal projecting to Node "ia" - # Control Signal "ia": Maximizes over the search space consisting of ints 1-5 - # Control Signal "deferred_node": disabled - - assert result == [[5]] + expected_text_1 = f"{ocomp.controller.name}, being used as controller for " \ + f"model-based optimization of {ocomp.name}, has 'state_features' specified " + expected_text_2 = f"that are missing from the Composition or any nested within it" + with pytest.raises(pnl.OptimizationControlMechanismError) as error_text: + ocomp.run({initial_node_a: [1]}) + error_text = error_text.value.error_value + assert expected_text_1 in error_text and expected_text_2 in error_text ocomp.add_linear_processing_pathway([deferred_node, initial_node_b]) - result = ocomp.run({ initial_node_a: [1], deferred_node: [1] }) - # result = 10, the sum of the input (1) multiplied by the value of the ControlSignals projecting, respectively, to Node "ia" and Node "deferred_node" # Control Signal "ia": Maximizes over the search space consisting of ints 1-5 # Control Signal "deferred_node": Maximizes over the search space consisting of ints 1-5 - assert result == [[10]] - def test_deferred_objective_mech(self): - initial_node = pnl.TransferMechanism(name='initial_node') - deferred_node = pnl.ProcessingMechanism(name='deferred') - ocomp = pnl.Composition(name='ocomp', - pathways=[initial_node], - controller_mode=pnl.BEFORE) - - initial_node_control_signal = pnl.ControlSignal(projections=[(pnl.SLOPE, initial_node)], - variable=1.0, - intensity_cost_function=pnl.Linear(slope=0.0), - allocation_samples=pnl.SampleSpec(start=1.0, - stop=5.0, - num=5)) - - ocomp.add_controller( - pnl.OptimizationControlMechanism( - agent_rep=ocomp, - state_features=[initial_node.input_port], - name="Controller", - objective_mechanism=pnl.ObjectiveMechanism( - monitor=deferred_node.output_port, - function=pnl.SimpleIntegrator, - name="oController Objective Mechanism" - ), - function=pnl.GridSearch(direction=pnl.MAXIMIZE), - control_signals=[ - initial_node_control_signal - ]) - ) + # FIX: DEPRACATE THIS TEST - IT ALLOWS A COMPOSITION TO EXECUTE WITH A BAD MONITOR FOR CONTROL SPECIFICATION + # SUPERCEDED BY test_args_specific_to_ocm outcome_input_ports WHICH TESTS FOR THIS + # def test_deferred_objective_mech(self): + # initial_node = pnl.TransferMechanism(name='initial_node') + # deferred_node = pnl.ProcessingMechanism(name='deferred') + # ocomp = pnl.Composition(name='ocomp', + # pathways=[initial_node], + # controller_mode=pnl.BEFORE) + # + # initial_node_control_signal = pnl.ControlSignal(projections=[(pnl.SLOPE, initial_node)], + # variable=1.0, + # intensity_cost_function=pnl.Linear(slope=0.0), + # allocation_samples=pnl.SampleSpec(start=1.0, + # stop=5.0, + # num=5)) + # + # ocomp.add_controller( + # pnl.OptimizationControlMechanism( + # agent_rep=ocomp, + # state_features=[initial_node.input_port], + # name="Controller", + # objective_mechanism=pnl.ObjectiveMechanism( + # monitor=deferred_node.output_port, + # function=pnl.SimpleIntegrator, + # name="oController Objective Mechanism" + # ), + # function=pnl.GridSearch(direction=pnl.MAXIMIZE), + # control_signals=[ + # initial_node_control_signal + # ]) + # ) + # + # text = 'The controller of ocomp has a specification that includes the '\ + # 'Mechanism oController Objective Mechanism, but oController '\ + # 'Objective Mechanism is not in ocomp or any of its nested Compositions. '\ + # 'This Mechanism will be deactivated until oController Objective Mechanism is '\ + # 'added to ocomp or one of its nested Compositions in a compatible way.' + # with pytest.warns(UserWarning, match=text): + # result = ocomp.run({initial_node: [1]}) + # + # assert result == [[1]] + # # result = 1, the input (1) multiplied by the first value in the SearchSpace of the ControlSignal projecting to + # # initial_node (1) + # + # # The objective Mechanism is disabled because one of its aux components is a projection to + # # deferred_node, which is not currently a member node of the composition. Therefore, the Controller + # # has no basis to determine which set of values it should use for its efferent ControlProjections and + # # simply goes with the first in the search space, which is 1. + # + # # add deferred_node to the Composition + # ocomp.add_linear_processing_pathway([initial_node, deferred_node]) + # + # # The objective mechanism's aux components are now all legal, so it will be activated on the following run + # result = ocomp.run({initial_node: [[1]]}) + # assert result == [[5]] + # # result = 5, the input (1) multiplied by the value of the ControlSignal projecting to Node "ia" + # # Control Signal "ia": Maximizes over the search space consisting of ints 1-5 + + # id, agent_rep, state_feat, mon_for_ctl, allow_probes, obj_mech err_type, error_msg + params = [ + ("allowable1", + "icomp", "I", "I", True, None, None, None + ), + ("allowable2", + "mcomp", "Ii A", "I B", True, None, None, None + ), + ("state_features_test_internal", + "icomp", "B", "I", True, None, pnl.CompositionError, + "Attempt to shadow the input to a node (B) in a nested Composition of OUTER COMP " + "that is not an INPUT Node of that Composition is not currently supported." + ), + ("state_features_test_not_in_agent_rep", + "icomp", "A", "I", True, None, pnl.OptimizationControlMechanismError, + "OCM, being used as controller for model-based optimization of INNER COMP, has 'state_features' " + "specified (['Shadowed input of A']) that are missing from the Composition or any nested within it." + ), + ("monitor_for_control_test_not_in_agent_rep", + "icomp", "I", "B", True, None, pnl.OptimizationControlMechanismError, + "OCM has 'outcome_ouput_ports' that receive Projections from the following Components " + "that do not belong to its agent_rep (INNER COMP): ['B']." + ), + ("monitor_for_control_with_obj_mech_test", + "icomp", "I", None, True, True, pnl.OptimizationControlMechanismError, + "OCM has 'outcome_ouput_ports' that receive Projections from the following Components " + "that do not belong to its agent_rep (INNER COMP): ['B']." + ), + ("probe_error_test", + "mcomp", "I", "B", False, None, pnl.CompositionError, + "B found in nested Composition of OUTER COMP (MIDDLE COMP) but without " + "required NodeRole.OUTPUT. Try setting 'allow_probes' argument of OCM to 'True'." + ), + ("probe_error_obj_mech_test", + "mcomp", "I", None, False, True, pnl.CompositionError, + "B found in nested Composition of OUTER COMP (MIDDLE COMP) but without required NodeRole.OUTPUT. " + "Try setting 'allow_probes' argument of ObjectiveMechanism for OCM to 'True'." + ) + ] + @pytest.mark.parametrize('id, agent_rep, state_features, monitor_for_control, allow_probes, objective_mechanism, error_type, err_msg', + params, ids=[x[0] for x in params]) + def test_args_specific_to_ocm(self, id, agent_rep, state_features, monitor_for_control, + allow_probes, objective_mechanism, error_type,err_msg): + """Test args specific to OptimizationControlMechanism + - state_feature must be in agent_rep + - monitor_for_control must be in agent_rep, whether specified directly or for ObjectiveMechanism + - allow_probes allows INTERNAL Nodes of nested comp to be monitored, otherwise generates and error + - probes are not included in Composition.results + """ - text = 'The controller of ocomp has a specification that includes the '\ - 'Mechanism oController Objective Mechanism, but oController '\ - 'Objective Mechanism is not in ocomp or any of its nested Compositions. '\ - 'This Mechanism will be deactivated until oController Objective Mechanism is '\ - 'added to ocomp or one of its nested Compositions in a compatible way.' - with pytest.warns(UserWarning, match=text): - result = ocomp.run({initial_node: [1]}) + # FIX: ADD VERSION WITH agent_rep = CompositionFuntionApproximator + # ADD TESTS FOR SEPARATE AND CONCATENATE - assert result == [[1]] - # result = 1, the input (1) multiplied by the first value in the SearchSpace of the ControlSignal projecting to - # initial_node (1) + from psyneulink.core.globals.utilities import convert_to_list - # The objective Mechanism is disabled because one of its aux components is a projection to - # deferred_node, which is not currently a member node of the composition. Therefore, the Controller - # has no basis to determine which set of values it should use for its efferent ControlProjections and - # simply goes with the first in the search space, which is 1. + I = pnl.ProcessingMechanism(name='I') + icomp = pnl.Composition(nodes=I, name='INNER COMP') - # add deferred_node to the Composition - ocomp.add_linear_processing_pathway([initial_node, deferred_node]) + A = pnl.ProcessingMechanism(name='A') + B = pnl.ProcessingMechanism(name='B') + C = pnl.ProcessingMechanism(name='C') + mcomp = pnl.Composition(pathways=[[A,B,C],icomp], + name='MIDDLE COMP') + ocomp = pnl.Composition(nodes=[mcomp], name='OUTER COMP', allow_probes=allow_probes) + + agent_rep = {"mcomp":mcomp, + "icomp":icomp + }[agent_rep] + + state_features = {"I":I, + "Ii A":[I.input_port, A], + "A":A, + "B":B, + }[state_features] + + if monitor_for_control: + monitor_for_control = {"I":I, + "I B":[I, B], + "B":B, + }[monitor_for_control] + + if objective_mechanism: + objective_mechanism = pnl.ObjectiveMechanism(monitor=B) + + if not err_msg: + ocm = pnl.OptimizationControlMechanism(name='OCM', + agent_rep=agent_rep, + state_features=state_features, + monitor_for_control=monitor_for_control, + objective_mechanism=objective_mechanism, + allow_probes=allow_probes, + function=pnl.GridSearch(), + control_signals=pnl.ControlSignal(modulates=(pnl.SLOPE,I), + allocation_samples=[10, 20, 30]) + ) + ocomp.add_controller(ocm) + ocomp._analyze_graph() + if allow_probes and B in convert_to_list(monitor_for_control): + # If this fails, could be due to ordering of ports in ocomp.output_CIM (current assumes probe is on 0) + assert ocomp.output_CIM._sender_is_probe(ocomp.output_CIM.output_ports[0]) + # Affirm that PROBE (included in ocomp's output_ports via its output_CIM + # but is *not* included in Composition.output_values (which is used for Composition.results) + assert len(ocomp.output_values) == len(ocomp.output_ports) - 1 - # The objective mechanism's aux components are now all legal, so it will be activated on the following run - result = ocomp.run({initial_node: [[1]]}) - assert result == [[5]] - # result = 5, the input (1) multiplied by the value of the ControlSignal projecting to Node "ia" - # Control Signal "ia": Maximizes over the search space consisting of ints 1-5 + else: + with pytest.raises(error_type) as err: + ocm = pnl.OptimizationControlMechanism(name='OCM', + agent_rep=agent_rep, + state_features=state_features, + monitor_for_control=monitor_for_control, + objective_mechanism=objective_mechanism, + allow_probes=allow_probes, + function=pnl.GridSearch(), + control_signals=pnl.ControlSignal(modulates=(pnl.SLOPE, + I), + allocation_samples=[10, 20, 30]) + ) + ocomp.add_controller(ocm) + ocomp._analyze_graph() + assert err.value.error_value == err_msg def test_agent_rep_assignement_as_controller_and_replacement(self): mech = pnl.ProcessingMechanism() comp = pnl.Composition(name='comp', - pathways=[mech], - controller=pnl.OptimizationControlMechanism(agent_rep=None, - control_signals=(pnl.SLOPE, mech), - search_space=[1])) + pathways=[mech], + controller=pnl.OptimizationControlMechanism(name="old_ocm", + agent_rep=None, + control_signals=(pnl.SLOPE, mech), + search_space=[1])) assert comp.controller.composition == comp + comp._analyze_graph() + assert comp.controller.state_input_ports[0].shadow_inputs == mech.input_port + assert comp.controller.state_input_ports[0].path_afferents[0].sender == mech.input_port.path_afferents[0].sender assert any(pnl.SLOPE in p_name for p_name in comp.projections.names) assert not any(pnl.INTERCEPT in p_name for p_name in comp.projections.names) + old_ocm = comp.controller - new_ocm = pnl.OptimizationControlMechanism(agent_rep=None, + new_ocm = pnl.OptimizationControlMechanism(name='new_ocm', + agent_rep=None, control_signals=(pnl.INTERCEPT, mech), search_space=[1]) - old_ocm = comp.controller comp.add_controller(new_ocm) + comp._analyze_graph() + #Confirm that components of new_ocm have been added assert comp.controller == new_ocm + assert any(pnl.INTERCEPT in p_name for p_name in comp.projections.names) + assert comp.controller.state_input_ports[0].shadow_inputs == mech.input_port + assert comp.controller.state_input_ports[0].path_afferents[0].sender == mech.input_port.path_afferents[0].sender + + # Confirm all components of old_ocm have been removed assert old_ocm.composition is None + assert old_ocm.state_input_ports[0].path_afferents == [] assert not any(pnl.SLOPE in p_name for p_name in comp.projections.names) - assert any(pnl.INTERCEPT in p_name for p_name in comp.projections.names) def test_hanging_control_spec_outer_controller(self): internal_mech = pnl.ProcessingMechanism( @@ -430,6 +558,113 @@ def test_hanging_control_spec_nearest_controller(self): assert result == [[5]] assert internal_mech.mod_afferents[0].sender.owner == inner_comp.controller + def test_state_input_ports_for_two_input_nodes(self): + # Inner Composition + ia = pnl.TransferMechanism(name='ia') + icomp = pnl.Composition(name='icomp', pathways=[ia]) + + # Outer Composition + oa = pnl.TransferMechanism(name='oa') + ob = pnl.TransferMechanism(name='ob') + oc = pnl.TransferMechanism(name='oc') + ctl_mech = pnl.ControlMechanism(name='ctl_mech', + control_signals=[pnl.ControlSignal(projections=[(pnl.SLOPE, ia)])]) + ocomp = pnl.Composition(name='ocomp', pathways=[[ob],[oa, icomp, oc, ctl_mech]]) + # ocomp.add_nodes(ob) + ocm = pnl.OptimizationControlMechanism(name='ocm', + agent_rep=ocomp, + control_signals=[ + pnl.ControlSignal(projections=[(pnl.NOISE, ia)]), + pnl.ControlSignal(projections=[(pnl.INTERCEPT, ia)]), + pnl.ControlSignal(projections=[(pnl.SLOPE, oa)]), + ], + search_space=[[1],[1],[1]]) + ocomp.add_controller(ocm) + result = ocomp.run({oa: [[1]], ob: [[2]]}) + assert result == [[2.], [1.]] + assert len(ocomp.controller.state_input_ports) == 2 + assert all([node in [input_port.shadow_inputs.owner for input_port in ocomp.controller.state_input_ports] + for node in {oa, ob}]) + + @pytest.mark.parametrize( + 'ocm_control_signals', + [ + 'None', + "[pnl.ControlSignal(modulates=('slope', a))]", + "[pnl.ControlSignal(modulates=('slope', a), allocation_samples=[1, 2])]", + ] + ) + @pytest.mark.parametrize('ocm_num_estimates', [None, 1, 2]) + @pytest.mark.parametrize( + 'slope, intercept', + [ + ((1.0, pnl.CONTROL), None), + ((1.0, pnl.CONTROL), (1.0, pnl.CONTROL)), + ] + ) + def test_transfer_mechanism_and_ocm_variations( + self, + slope, + intercept, + ocm_num_estimates, + ocm_control_signals, + ): + a = pnl.TransferMechanism( + name='a', + function=pnl.Linear( + slope=slope, + intercept=intercept, + ) + ) + + comp = pnl.Composition() + comp.add_node(a) + + ocm_control_signals = eval(ocm_control_signals) + + search_space_len = len( + set([ + # value of parameter name in 'modulates' kwarg + p[0] + for cs in ocm_control_signals + for p in cs._init_args['projections'] + ]) if ocm_control_signals is not None else set() + .union({'slope'} if slope is not None else set()) + .union({'intercept'} if intercept is not None else set()) + ) + search_space = [[0, 1]] * search_space_len + + ocm = pnl.OptimizationControlMechanism( + agent_rep=comp, + search_space=search_space, + num_estimates=ocm_num_estimates, + control_signals=ocm_control_signals, + ) + comp.add_controller(ocm) + + # assume tuple is a control spec + if ( + isinstance(slope, tuple) + or ( + ocm_control_signals is not None + and any(cs.name == 'slope' for cs in ocm_control_signals) + ) + ): + assert 'a[slope] ControlSignal' in ocm.control.names + else: + assert 'a[slope] ControlSignal' not in ocm.control.names + + if ( + isinstance(intercept, tuple) + or ( + ocm_control_signals is not None + and any(cs.name == 'intercept' for cs in ocm_control_signals) + ) + ): + assert 'a[intercept] ControlSignal' in ocm.control.names + else: + assert 'a[intercept] ControlSignal' not in ocm.control.names + class TestControlMechanisms: def test_modulation_of_control_signal_intensity_cost_function_MULTIPLICATIVE(self): @@ -455,7 +690,7 @@ def test_modulation_of_control_signal_intensity_cost_function_MULTIPLICATIVE(sel def test_feedback_assignment_for_multiple_control_projections_to_same_mechanism(self): """Test that multiple ControlProjections from a ControlMechanism to the same Mechanism are treated - same as a single Controlprojection to that Mechanism. + same as a single ControlProjection to that Mechanism. Note: Even though both mech and control_mech don't receive pathway inputs, since control_mech projects to mech, control_mech is assigned as NodeRole.INPUT (can be overridden with assignments in add_nodes) """ @@ -555,7 +790,7 @@ def test_lvoc_features_function(self): c._analyze_graph() lvoc = pnl.OptimizationControlMechanism(agent_rep=pnl.RegressionCFA, state_features=[m1.input_ports[0], m1.input_ports[1], m2.input_port, m2], - state_feature_function=pnl.LinearCombination(offset=10.0), + state_feature_functions=pnl.LinearCombination(offset=10.0), objective_mechanism=pnl.ObjectiveMechanism( monitor=[m1, m2]), function=pnl.GradientOptimization(max_iterations=1), @@ -568,7 +803,7 @@ def test_lvoc_features_function(self): assert len(lvoc.input_ports) == 5 for i in range(1,5): - assert lvoc.input_ports[i].function.offset.base == 10.0 + assert lvoc.input_ports[i].function.offset == 10.0 @pytest.mark.control @pytest.mark.composition @@ -597,7 +832,7 @@ def test_multilevel_ocm_gridsearch_conflicting_directions(self, mode, benchmark) pnl.OptimizationControlMechanism( agent_rep=ocomp, state_features=[oa.input_port], - # state_feature_function=pnl.Buffer(history=2), + # state_feature_functions=pnl.Buffer(history=2), name="Controller", objective_mechanism=pnl.ObjectiveMechanism( monitor=ib.output_port, @@ -614,7 +849,7 @@ def test_multilevel_ocm_gridsearch_conflicting_directions(self, mode, benchmark) pnl.OptimizationControlMechanism( agent_rep=icomp, state_features=[ia.input_port], - # state_feature_function=pnl.Buffer(history=2), + # state_feature_functions=pnl.Buffer(history=2), name="Controller", objective_mechanism=pnl.ObjectiveMechanism( monitor=ib.output_port, @@ -660,7 +895,7 @@ def test_multilevel_ocm_gridsearch_maximize(self, mode, benchmark): pnl.OptimizationControlMechanism( agent_rep=ocomp, state_features=[oa.input_port], - # state_feature_function=pnl.Buffer(history=2), + # state_feature_functions=pnl.Buffer(history=2), name="Controller", objective_mechanism=pnl.ObjectiveMechanism( monitor=ib.output_port, @@ -679,7 +914,7 @@ def test_multilevel_ocm_gridsearch_maximize(self, mode, benchmark): pnl.OptimizationControlMechanism( agent_rep=icomp, state_features=[ia.input_port], - # state_feature_function=pnl.Buffer(history=2), + # state_feature_functions=pnl.Buffer(history=2), name="Controller", objective_mechanism=pnl.ObjectiveMechanism( monitor=ib.output_port, @@ -727,8 +962,8 @@ def test_multilevel_ocm_gridsearch_minimize(self, mode, benchmark): pnl.OptimizationControlMechanism( agent_rep=ocomp, state_features=[oa.input_port], - # state_feature_function=pnl.Buffer(history=2), - name="Controller", + # state_feature_functions=pnl.Buffer(history=2), + name="oController", objective_mechanism=pnl.ObjectiveMechanism( monitor=ib.output_port, function=pnl.SimpleIntegrator, @@ -746,12 +981,12 @@ def test_multilevel_ocm_gridsearch_minimize(self, mode, benchmark): pnl.OptimizationControlMechanism( agent_rep=icomp, state_features=[ia.input_port], - # state_feature_function=pnl.Buffer(history=2), - name="Controller", + # state_feature_functions=pnl.Buffer(history=2), + name="iController", objective_mechanism=pnl.ObjectiveMechanism( monitor=ib.output_port, function=pnl.SimpleIntegrator, - name="oController Objective Mechanism" + name="iController Objective Mechanism" ), function=pnl.GridSearch(direction=pnl.MINIMIZE), control_signals=[pnl.ControlSignal(projections=[(pnl.SLOPE, ia)], @@ -885,7 +1120,7 @@ def test_two_tier_ocm(self): pnl.OptimizationControlMechanism(agent_rep=stabilityFlexibility, state_features=[taskLayer.input_port, stimulusInfo.input_port], - state_feature_function=pnl.Buffer(history=2), + state_feature_functions=pnl.Buffer(history=2), name="Controller", objective_mechanism=pnl.ObjectiveMechanism( monitor=[(pnl.PROBABILITY_UPPER_THRESHOLD, @@ -911,7 +1146,7 @@ def test_two_tier_ocm(self): outerComposition.add_controller( pnl.OptimizationControlMechanism(agent_rep=stabilityFlexibility, state_features=[taskLayer.input_port, stimulusInfo.input_port], - state_feature_function=pnl.Buffer(history=2), + state_feature_functions=pnl.Buffer(history=2), name="OuterController", objective_mechanism=pnl.ObjectiveMechanism( monitor=[(pnl.PROBABILITY_UPPER_THRESHOLD, decisionMaker)], @@ -1050,49 +1285,158 @@ def test_control_of_mech_port(self, comp_mode): @pytest.mark.control @pytest.mark.composition - def test_modulation_of_random_state(self): - src = pnl.ProcessingMechanism() - mech = pnl.ProcessingMechanism(function=pnl.UniformDist()) + @pytest.mark.parametrize("cost, expected, exp_values", [ + (pnl.CostFunctions.NONE, 7.0, [3, 4, 5, 6, 7]), + (pnl.CostFunctions.INTENSITY, 3, [0.2817181715409549, -3.3890560989306495, -15.085536923187664, -48.59815003314423, -141.41315910257657]), + (pnl.CostFunctions.ADJUSTMENT, 3, [3, 3, 3, 3, 3] ), + (pnl.CostFunctions.INTENSITY | pnl.CostFunctions.ADJUSTMENT, 3, [0.2817181715409549, -4.389056098930649, -17.085536923187664, -51.59815003314423, -145.41315910257657]), + (pnl.CostFunctions.DURATION, 3, [-17, -20, -23, -26, -29]), + # FIXME: combinations with DURATION are broken + # (pnl.CostFunctions.DURATION | pnl.CostFunctions.ADJUSTMENT, ,), + # (pnl.CostFunctions.ALL, ,), + pytest.param(pnl.CostFunctions.DEFAULTS, 7, [3, 4, 5, 6, 7], id="CostFunctions.DEFAULT")], + ids=lambda x: x if isinstance(x, pnl.CostFunctions) else "") + def test_modulation_simple(self, cost, expected, exp_values, comp_mode): + if comp_mode != pnl.ExecutionMode.Python and cost not in {pnl.CostFunctions.NONE, pnl.CostFunctions.INTENSITY}: + pytest.skip("Not implemented!") + + obj = pnl.ObjectiveMechanism() + mech = pnl.ProcessingMechanism() - comp = pnl.Composition(retain_old_simulation_data=True) + comp = pnl.Composition(controller_mode=pnl.BEFORE) comp.add_node(mech, required_roles=pnl.NodeRole.INPUT) - comp.add_node(src) + comp.add_linear_processing_pathway([mech, obj]) comp.add_controller( pnl.OptimizationControlMechanism( - monitor_for_control=src, + objective_mechanism=obj, + state_features=[mech.input_port], control_signals=pnl.ControlSignal( - modulates=('seed', mech), + modulates=('intercept', mech), modulation=pnl.OVERRIDE, - allocation_samples=pnl.SampleSpec(start=0, stop=5, step=1), + allocation_samples=pnl.SampleSpec(start=1, stop=5, step=1), + cost_options=cost, ) ) ) - def seed_check(context): - latest_sim = comp.controller.parameters.simulation_ids._get(context)[-1] + ret = comp.run(inputs={mech: [2]}, num_trials=1, execution_mode=comp_mode) + assert np.allclose(ret, expected) + if comp_mode == pnl.ExecutionMode.Python: + assert np.allclose([float(x) for x in comp.controller.function.saved_values], exp_values) + + @pytest.mark.benchmark + @pytest.mark.control + @pytest.mark.composition + @pytest.mark.parametrize('prng', ['Default', 'Philox']) + def test_modulation_of_random_state_direct(self, comp_mode, benchmark, prng): + # set explicit seed to make sure modulation is different + mech = pnl.ProcessingMechanism(function=pnl.UniformDist(seed=0)) + if prng == 'Philox': + mech.function.parameters.random_state.set(_SeededPhilox([0])) + ctl_mech = pnl.ControlMechanism(control_signals=pnl.ControlSignal(modulates=('seed', mech), + modulation=pnl.OVERRIDE)) + comp = pnl.Composition() + comp.add_node(mech) + comp.add_node(ctl_mech) - seed = mech.get_mod_seed(latest_sim) - rs = mech.function.parameters.random_state.get(latest_sim) + seeds = [13, 13, 14] + # cycle over the seeds twice setting and resetting the random state + benchmark(comp.run, inputs={ctl_mech:seeds}, num_trials=len(seeds) * 2, execution_mode=comp_mode) - # mech (and so its function and random_state) should be called - # exactly twice in one trial given the specified condition, - # and the random_state should be reset before the first execution - new_rs = np.random.RandomState([int(seed)]) + if prng == 'Default': + prngs = {s:np.random.RandomState([s]) for s in seeds} + elif prng == 'Philox': + prngs = {s:_SeededPhilox([s]) for s in seeds} - for i in range(2): - new_rs.uniform(0, 1) + expected = [prngs[s].uniform() for s in seeds] * 2 + assert np.allclose(np.squeeze(comp.results[:len(seeds) * 2]), expected) - assert rs.uniform(0, 1) == new_rs.uniform(0, 1) + @pytest.mark.benchmark + @pytest.mark.control + @pytest.mark.composition + # 'LLVM' mode is not supported, because synchronization of compiler and + # python values during execution is not implemented. + @pytest.mark.usefixtures("comp_mode_no_llvm") + @pytest.mark.parametrize('prng', ['Default', 'Philox']) + def test_modulation_of_random_state_DDM(self, comp_mode, benchmark, prng): + # set explicit seed to make sure modulation is different + mech = pnl.DDM(function=pnl.DriftDiffusionIntegrator(noise=5.), + reset_stateful_function_when=pnl.AtPass(0), + execute_until_finished=True) + if prng == 'Philox': + mech.function.parameters.random_state.set(_SeededPhilox([0])) + ctl_mech = pnl.ControlMechanism(control_signals=pnl.ControlSignal(modulates=('seed-function', mech), + modulation=pnl.OVERRIDE)) + comp = pnl.Composition() + comp.add_node(mech, required_roles=pnl.NodeRole.INPUT) + comp.add_node(ctl_mech) - comp.termination_processing = {pnl.TimeScale.TRIAL: pnl.AfterNCalls(mech, 2)} - comp.run( - inputs={src: [1], mech: [1]}, - call_after_trial=seed_check - ) + seeds = [13, 13, 14] + # cycle over the seeds twice setting and resetting the random state + benchmark(comp.run, inputs={ctl_mech:seeds, mech:5.0}, num_trials=len(seeds) * 2, execution_mode=comp_mode) + + if prng == 'Default': + assert np.allclose(np.squeeze(comp.results[:len(seeds) * 2]), [[100, 21], [100, 23], [100, 20]] * 2) + elif prng == 'Philox': + assert np.allclose(np.squeeze(comp.results[:len(seeds) * 2]), [[100, 19], [100, 21], [100, 21]] * 2) + + @pytest.mark.control + @pytest.mark.composition + @pytest.mark.parametrize("num_generators", [5]) + def test_modulation_of_random_state(self, comp_mode, num_generators): + obj = pnl.ObjectiveMechanism() + # Set original seed that is not used by any evaluation + # this prevents dirty state from initialization skewing the results. + # The alternative would be to set: + # mech.functions.seed.base = mech.functions.seed.base + # to reset the PRNG + mech = pnl.ProcessingMechanism(function=pnl.UniformDist(seed=num_generators)) + + comp = pnl.Composition(retain_old_simulation_data=True, + controller_mode=pnl.BEFORE) + comp.add_node(mech, required_roles=pnl.NodeRole.INPUT) + comp.add_linear_processing_pathway([mech, obj]) + comp.add_controller( + pnl.OptimizationControlMechanism( + state_features=[mech.input_port], + objective_mechanism=obj, + control_signals=pnl.ControlSignal( + modulates=('seed', mech), + modulation=pnl.OVERRIDE, + allocation_samples=pnl.SampleSpec(start=0, stop=num_generators - 1, step=1), + # FIX: 11/3/21 DELETE: [NOT NEEDED ANYMORE] + cost_options=pnl.CostFunctions.NONE + ) + ) + ) -class TestModelBasedOptimizationControlMechanisms: + comp.run(inputs={mech: [1]}, + num_trials=2, + report_output=pnl.ReportOutput.FULL, + report_params=pnl.ReportParams.MONITORED, + execution_mode=comp_mode) + + # Construct expected results. + # First all generators rest their sequence. + # In the second trial, the "winning" seed from the previous one continues its + # random sequence + all_generators = [np.random.RandomState([seed]) for seed in range(num_generators)] + first_generator_samples = [g.uniform(0, 1) for g in all_generators] + best_first = max(first_generator_samples) + index_best = first_generator_samples.index(best_first) + second_generator_samples = [g.uniform(0, 1) for g in all_generators] + second_considerations = first_generator_samples[:index_best] + \ + second_generator_samples[index_best:index_best + 1] + \ + first_generator_samples[index_best + 1:] + best_second = max(second_considerations) + # Check that we select the maximum of generated values + assert np.allclose(best_first, comp.results[0]) + assert np.allclose(best_second, comp.results[1]) + + +class TestModelBasedOptimizationControlMechanisms_Execution: def test_ocm_default_function(self): a = pnl.ProcessingMechanism() comp = pnl.Composition( @@ -1205,7 +1549,7 @@ def test_evc(self): comp.add_controller(controller=pnl.OptimizationControlMechanism( agent_rep=comp, state_features=[Input.input_port, reward.input_port], - state_feature_function=pnl.AdaptiveIntegrator(rate=0.5), + state_feature_functions=pnl.AdaptiveIntegrator(rate=0.5), objective_mechanism=pnl.ObjectiveMechanism( function=pnl.LinearCombination(operation=pnl.PRODUCT), monitor=[reward, @@ -1344,7 +1688,7 @@ def test_evc_gratton(self): state_features=[target_stim.input_port, flanker_stim.input_port, reward.input_port], - state_feature_function=pnl.AdaptiveIntegrator( + state_feature_functions=pnl.AdaptiveIntegrator( rate=1.0), objective_mechanism=objective_mech, function=pnl.GridSearch(), @@ -1489,7 +1833,7 @@ def test_laming_validation_specify_control_signals(self): controller=pnl.OptimizationControlMechanism( agent_rep=comp, state_features=[Input.input_port, reward.input_port], - state_feature_function=pnl.AdaptiveIntegrator(rate=0.5), + state_feature_functions=pnl.AdaptiveIntegrator(rate=0.5), objective_mechanism=pnl.ObjectiveMechanism( function=pnl.LinearCombination(operation=pnl.PRODUCT), monitor=[ @@ -1627,7 +1971,7 @@ def test_stateful_mechanism_in_simulation(self): controller=pnl.OptimizationControlMechanism( agent_rep=comp, state_features=[Input.input_port, reward.input_port], - state_feature_function=pnl.AdaptiveIntegrator(rate=0.5), + state_feature_functions=pnl.AdaptiveIntegrator(rate=0.5), objective_mechanism=pnl.ObjectiveMechanism( function=pnl.LinearCombination(operation=pnl.PRODUCT), monitor=[ @@ -1740,6 +2084,7 @@ def test_model_based_ocm_after(self, benchmark, mode): control_signal = pnl.ControlSignal(projections=[(pnl.SLOPE, A)], variable=1.0, allocation_samples=search_range, + cost_options=pnl.CostFunctions.INTENSITY, intensity_cost_function=pnl.Linear(slope=0.)) objective_mech = pnl.ObjectiveMechanism(monitor=[B]) @@ -1759,6 +2104,8 @@ def test_model_based_ocm_after(self, benchmark, mode): # objective_mech.log.print_entries(pnl.OUTCOME) assert np.allclose(comp.results, [[np.array([1.])], [np.array([1.5])], [np.array([2.25])]]) + if mode == pnl.ExecutionMode.Python: + assert np.allclose(np.asfarray(ocm.function.saved_values).flatten(), [0.75, 1.5, 2.25]) if benchmark.enabled: benchmark(comp.run, inputs, execution_mode=mode) @@ -1788,6 +2135,7 @@ def test_model_based_ocm_before(self, benchmark, mode): control_signal = pnl.ControlSignal(projections=[(pnl.SLOPE, A)], variable=1.0, allocation_samples=search_range, + cost_options=pnl.CostFunctions.INTENSITY, intensity_cost_function=pnl.Linear(slope=0.)) objective_mech = pnl.ObjectiveMechanism(monitor=[B]) @@ -1807,6 +2155,8 @@ def test_model_based_ocm_before(self, benchmark, mode): # objective_mech.log.print_entries(pnl.OUTCOME) assert np.allclose(comp.results, [[np.array([0.75])], [np.array([1.5])], [np.array([2.25])]]) + if mode == pnl.ExecutionMode.Python: + assert np.allclose(np.asfarray(ocm.function.saved_values).flatten(), [0.75, 1.5, 2.25]) if benchmark.enabled: benchmark(comp.run, inputs, execution_mode=mode) @@ -1831,7 +2181,7 @@ def test_model_based_ocm_with_buffer(self): objective_mech = pnl.ObjectiveMechanism(monitor=[B]) ocm = pnl.OptimizationControlMechanism(agent_rep=comp, state_features=[A.input_port], - state_feature_function=pnl.Buffer(history=2), + state_feature_functions=pnl.Buffer(history=2), objective_mechanism=objective_mech, function=pnl.GridSearch(), control_signals=[control_signal]) @@ -2031,7 +2381,7 @@ def computeAccuracy(trialInformation): # Sets trial history for simulations over specified signal search parameters metaController = pnl.OptimizationControlMechanism(agent_rep=stabilityFlexibility, state_features=[taskLayer.input_port, stimulusInfo.input_port], - state_feature_function=pnl.Buffer(history=10), + state_feature_functions=pnl.Buffer(history=10), name="Controller", objective_mechanism=objectiveMechanism, function=pnl.GridSearch(), @@ -2050,7 +2400,8 @@ def computeAccuracy(trialInformation): inputs = {taskLayer: taskTrain, stimulusInfo: stimulusTrain} stabilityFlexibility.run(inputs) - def test_model_based_num_estimates(self): + @pytest.mark.parametrize('num_estimates',[None, 1] ) + def test_model_based_num_estimates(self, num_estimates): A = pnl.ProcessingMechanism(name='A') B = pnl.ProcessingMechanism(name='B', @@ -2070,7 +2421,7 @@ def test_model_based_num_estimates(self): state_features=[A.input_port], objective_mechanism=objective_mech, function=pnl.GridSearch(), - num_estimates=5, + num_estimates=num_estimates, control_signals=[control_signal]) comp.add_controller(ocm) @@ -2080,6 +2431,10 @@ def test_model_based_num_estimates(self): comp.run(inputs=inputs, num_trials=2) + if num_estimates is None: + assert pnl.RANDOMIZATION_CONTROL_SIGNAL not in comp.controller.control_signals # Confirm no estimates + elif num_estimates==1: + assert comp.controller.control_signals[pnl.RANDOMIZATION_CONTROL_SIGNAL].efferents == []# Confirm no noise assert np.allclose(comp.simulation_results, [[np.array([2.25])], [np.array([3.5])], [np.array([4.75])], [np.array([3.])], [np.array([4.25])], [np.array([5.5])]]) assert np.allclose(comp.results, @@ -2136,6 +2491,7 @@ def test_grid_search_random_selection(self, comp_mode, benchmark): control_signal = pnl.ControlSignal(projections=[(pnl.SLOPE, A)], variable=1.0, allocation_samples=search_range, + cost_options=pnl.CostFunctions.INTENSITY, intensity_cost_function=pnl.Linear(slope=0.)) objective_mech = pnl.ObjectiveMechanism(monitor=[B]) @@ -2191,9 +2547,11 @@ def test_input_CIM_assignment(self, comp_mode): control_signals=[ pnl.ControlSignal(modulates=[(pnl.SLOPE, input_a)], intensity_cost_function=pnl.Linear(slope=1), + cost_options=pnl.CostFunctions.INTENSITY, allocation_samples=[-1, 1]), pnl.ControlSignal(modulates=[(pnl.SLOPE, input_b)], intensity_cost_function=pnl.Linear(slope=0), + cost_options=pnl.CostFunctions.INTENSITY, allocation_samples=[-1, 1]) ])) results = comp.run(inputs={input_a: [[5]], input_b: [[-2]]}, diff --git a/tests/composition/test_interfaces.py b/tests/composition/test_interfaces.py index 7ff5aee7345..1c2659a667d 100644 --- a/tests/composition/test_interfaces.py +++ b/tests/composition/test_interfaces.py @@ -295,9 +295,11 @@ def test_compositions_as_origin_nodes(self, comp_mode): outer_composition.add_node(inner_composition_2) outer_composition.add_node(mechanism_d) - outer_composition.add_projection(projection=MappingProjection(), sender=inner_composition_1, + outer_composition.add_projection(projection=MappingProjection(), + sender=inner_composition_1, receiver=mechanism_d) - outer_composition.add_projection(projection=MappingProjection(), sender=inner_composition_2, + outer_composition.add_projection(projection=MappingProjection(), + sender=inner_composition_2, receiver=mechanism_d) sched = Scheduler(composition=outer_composition) diff --git a/tests/composition/test_learning.py b/tests/composition/test_learning.py index 90c47f41af3..de7dae19478 100644 --- a/tests/composition/test_learning.py +++ b/tests/composition/test_learning.py @@ -495,39 +495,39 @@ def test_simple_hebbian(self): class TestReinforcement: def test_rl(self): - input_layer = pnl.TransferMechanism(size=2, - name='Input Layer') - input_layer.log.set_log_conditions(items=pnl.VALUE) - action_selection = pnl.DDM(input_format=pnl.ARRAY, - function=pnl.DriftDiffusionAnalytical(), - output_ports=[pnl.SELECTED_INPUT_ARRAY], - name='DDM') - action_selection.log.set_log_conditions(items=pnl.SELECTED_INPUT_ARRAY) - - comp = pnl.Composition(name='comp') - learning_pathway = comp.add_reinforcement_learning_pathway(pathway=[input_layer, action_selection], - learning_rate=0.05) - learned_projection = learning_pathway.learning_components[pnl.LEARNED_PROJECTIONS] - learning_mechanism = learning_pathway.learning_components[pnl.LEARNING_MECHANISMS] - target_mechanism = learning_pathway.target - comparator_mechanism = learning_pathway.learning_objective - - learned_projection.log.set_log_conditions(items=["matrix", "mod_matrix"]) - - inputs_dict = {input_layer: [[1., 1.], [1., 1.]], - target_mechanism: [[10.], [10.]] - } - learning_mechanism.log.set_log_conditions(items=[pnl.VALUE]) - comparator_mechanism.log.set_log_conditions(items=[pnl.VALUE]) - - target_mechanism.log.set_log_conditions(items=pnl.VALUE) - comp.learn(inputs=inputs_dict) - - - assert np.allclose(learning_mechanism.value, [np.array([0.4275, 0.]), np.array([0.4275, 0.])]) - assert np.allclose(action_selection.value, [[1.], [2.30401336], [0.97340301], [0.02659699], [2.30401336], - [2.08614798], [1.85006765], [2.30401336], [2.08614798], - [1.85006765]]) + input_layer = pnl.TransferMechanism(size=2, + name='Input Layer') + input_layer.log.set_log_conditions(items=pnl.VALUE) + action_selection = pnl.DDM(input_format=pnl.ARRAY, + function=pnl.DriftDiffusionAnalytical(), + output_ports=[pnl.SELECTED_INPUT_ARRAY], + name='DDM') + action_selection.log.set_log_conditions(items=pnl.SELECTED_INPUT_ARRAY) + + comp = pnl.Composition(name='comp') + learning_pathway = comp.add_reinforcement_learning_pathway(pathway=[input_layer, action_selection], + learning_rate=0.05) + learned_projection = learning_pathway.learning_components[pnl.LEARNED_PROJECTIONS] + learning_mechanism = learning_pathway.learning_components[pnl.LEARNING_MECHANISMS] + target_mechanism = learning_pathway.target + comparator_mechanism = learning_pathway.learning_objective + + learned_projection.log.set_log_conditions(items=["matrix", "mod_matrix"]) + + inputs_dict = {input_layer: [[1., 1.], [1., 1.]], + target_mechanism: [[10.], [10.]] + } + learning_mechanism.log.set_log_conditions(items=[pnl.VALUE]) + comparator_mechanism.log.set_log_conditions(items=[pnl.VALUE]) + + target_mechanism.log.set_log_conditions(items=pnl.VALUE) + comp.learn(inputs=inputs_dict) + + + assert np.allclose(learning_mechanism.value, [np.array([0.4275, 0.]), np.array([0.4275, 0.])]) + assert np.allclose(action_selection.value, [[1.], [2.30401336], [0.97340301], [0.02659699], [2.30401336], + [2.08614798], [1.85006765], [2.30401336], [2.08614798], + [1.85006765]]) def test_reinforcement_fixed_targets(self): input_layer = pnl.TransferMechanism(size=2, @@ -1326,56 +1326,56 @@ def test_prediction_error_delta_first_run(self): err_msg="mismatch on timestep {}".format(i)) def test_rl_enable_learning_false(self): - input_layer = pnl.TransferMechanism(size=2, - name='Input Layer') - input_layer.log.set_log_conditions(items=pnl.VALUE) - action_selection = pnl.DDM(input_format=pnl.ARRAY, - function=pnl.DriftDiffusionAnalytical(), - output_ports=[pnl.SELECTED_INPUT_ARRAY], - name='DDM') - action_selection.log.set_log_conditions(items=pnl.SELECTED_INPUT_ARRAY) - - comp = pnl.Composition(name='comp') - learning_pathway = comp.add_reinforcement_learning_pathway(pathway=[input_layer, action_selection], - learning_rate=0.05) - learned_projection = learning_pathway.learning_components[pnl.LEARNED_PROJECTIONS] - learning_mechanism = learning_pathway.learning_components[pnl.LEARNING_MECHANISMS] - target_mechanism = learning_pathway.learning_components[pnl.TARGET_MECHANISM] - comparator_mechanism = learning_pathway.learning_components[pnl.OBJECTIVE_MECHANISM] - - learned_projection.log.set_log_conditions(items=["matrix", "mod_matrix"]) - - inputs_dict = {input_layer: [[1., 1.], [1., 1.]], - target_mechanism: [[10.], [10.]] - } - learning_mechanism.log.set_log_conditions(items=[pnl.VALUE]) - comparator_mechanism.log.set_log_conditions(items=[pnl.VALUE]) - - target_mechanism.log.set_log_conditions(items=pnl.VALUE) - comp.learn(inputs=inputs_dict) - - - assert np.allclose(learning_mechanism.value, [np.array([0.4275, 0.]), np.array([0.4275, 0.])]) - assert np.allclose(action_selection.value, [[1.], [2.30401336], [0.97340301], [0.02659699], [2.30401336], - [2.08614798], [1.85006765], [2.30401336], [2.08614798], - [1.85006765]]) - - # Pause learning -- values are the same as the previous trial (because we pass in the same inputs) - inputs_dict = {input_layer: [[1., 1.], [1., 1.]]} - comp.run(inputs=inputs_dict) - assert np.allclose(learning_mechanism.value, [np.array([0.4275, 0.]), np.array([0.4275, 0.])]) - assert np.allclose(action_selection.value, [[1.], [2.30401336], [0.97340301], [0.02659699], [2.30401336], - [2.08614798], [1.85006765], [2.30401336], [2.08614798], - [1.85006765]]) - - # Resume learning - inputs_dict = {input_layer: [[1., 1.], [1., 1.]], - target_mechanism: [[10.], [10.]]} - comp.learn(inputs=inputs_dict) - assert np.allclose(learning_mechanism.value, [np.array([0.38581875, 0.]), np.array([0.38581875, 0.])]) - assert np.allclose(action_selection.value, [[1.], [0.978989672], [0.99996], [0.0000346908466], [0.978989672], - [0.118109771], [1.32123733], [0.978989672], [0.118109771], - [1.32123733]]) + input_layer = pnl.TransferMechanism(size=2, + name='Input Layer') + input_layer.log.set_log_conditions(items=pnl.VALUE) + action_selection = pnl.DDM(input_format=pnl.ARRAY, + function=pnl.DriftDiffusionAnalytical(), + output_ports=[pnl.SELECTED_INPUT_ARRAY], + name='DDM') + action_selection.log.set_log_conditions(items=pnl.SELECTED_INPUT_ARRAY) + + comp = pnl.Composition(name='comp') + learning_pathway = comp.add_reinforcement_learning_pathway(pathway=[input_layer, action_selection], + learning_rate=0.05) + learned_projection = learning_pathway.learning_components[pnl.LEARNED_PROJECTIONS] + learning_mechanism = learning_pathway.learning_components[pnl.LEARNING_MECHANISMS] + target_mechanism = learning_pathway.learning_components[pnl.TARGET_MECHANISM] + comparator_mechanism = learning_pathway.learning_components[pnl.OBJECTIVE_MECHANISM] + + learned_projection.log.set_log_conditions(items=["matrix", "mod_matrix"]) + + inputs_dict = {input_layer: [[1., 1.], [1., 1.]], + target_mechanism: [[10.], [10.]] + } + learning_mechanism.log.set_log_conditions(items=[pnl.VALUE]) + comparator_mechanism.log.set_log_conditions(items=[pnl.VALUE]) + + target_mechanism.log.set_log_conditions(items=pnl.VALUE) + comp.learn(inputs=inputs_dict) + + + assert np.allclose(learning_mechanism.value, [np.array([0.4275, 0.]), np.array([0.4275, 0.])]) + assert np.allclose(action_selection.value, [[1.], [2.30401336], [0.97340301], [0.02659699], [2.30401336], + [2.08614798], [1.85006765], [2.30401336], [2.08614798], + [1.85006765]]) + + # Pause learning -- values are the same as the previous trial (because we pass in the same inputs) + inputs_dict = {input_layer: [[1., 1.], [1., 1.]]} + comp.run(inputs=inputs_dict) + assert np.allclose(learning_mechanism.value, [np.array([0.4275, 0.]), np.array([0.4275, 0.])]) + assert np.allclose(action_selection.value, [[1.], [2.30401336], [0.97340301], [0.02659699], [2.30401336], + [2.08614798], [1.85006765], [2.30401336], [2.08614798], + [1.85006765]]) + + # Resume learning + inputs_dict = {input_layer: [[1., 1.], [1., 1.]], + target_mechanism: [[10.], [10.]]} + comp.learn(inputs=inputs_dict) + assert np.allclose(learning_mechanism.value, [np.array([0.38581875, 0.]), np.array([0.38581875, 0.])]) + assert np.allclose(action_selection.value, [[1.], [0.978989672], [0.99996], [0.0000346908466], [0.978989672], + [0.118109771], [1.32123733], [0.978989672], [0.118109771], + [1.32123733]]) def test_td_enabled_learning_false(self): @@ -2032,7 +2032,7 @@ def test_backprop_with_various_intersecting_pathway_configurations(self, configu comp = pnl.Composition(name=configuration) comp.add_backpropagation_learning_pathway(pathway=[A,D,E]) comp.add_backpropagation_learning_pathway(pathway=[A,B,C]) - if show_graph == True: + if show_graph is True: comp.show_graph(show_learning=True) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.INPUT) for n in {A}) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.OUTPUT) for n in {E,C}) @@ -2057,7 +2057,7 @@ def test_backprop_with_various_intersecting_pathway_configurations(self, configu comp = pnl.Composition(name=configuration) comp.add_backpropagation_learning_pathway(pathway=[D,B,E]) comp.add_backpropagation_learning_pathway(pathway=[A,B,C]) - if show_graph == True: + if show_graph is True: comp.show_graph(show_learning=True) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.INPUT) for n in {A,D}) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.OUTPUT) for n in {C}) @@ -2084,7 +2084,7 @@ def test_backprop_with_various_intersecting_pathway_configurations(self, configu comp = pnl.Composition(name=configuration) comp.add_backpropagation_learning_pathway(pathway=[D,E,A]) comp.add_backpropagation_learning_pathway(pathway=[A,B,C]) - if show_graph == True: + if show_graph is True: comp.show_graph(show_learning=True) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.INPUT) for n in {D}) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.OUTPUT) for n in {C}) @@ -2107,7 +2107,7 @@ def test_backprop_with_various_intersecting_pathway_configurations(self, configu comp = pnl.Composition(name=configuration) comp.add_backpropagation_learning_pathway(pathway=[A,D]) comp.add_backpropagation_learning_pathway(pathway=[B,A,C]) - if show_graph == True: + if show_graph is True: comp.show_graph(show_learning=True) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.INPUT) for n in {B}) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.OUTPUT) for n in {D, C}) @@ -2130,7 +2130,7 @@ def test_backprop_with_various_intersecting_pathway_configurations(self, configu comp = pnl.Composition(name=configuration) comp.add_backpropagation_learning_pathway(pathway=[D,A,E]) comp.add_backpropagation_learning_pathway(pathway=[B,A,C]) - if show_graph == True: + if show_graph is True: comp.show_graph(show_learning=True) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.INPUT) for n in {D,B}) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.OUTPUT) for n in {E,C}) @@ -2153,7 +2153,7 @@ def test_backprop_with_various_intersecting_pathway_configurations(self, configu comp = pnl.Composition(name=configuration) comp.add_backpropagation_learning_pathway(pathway=[D,A]) comp.add_backpropagation_learning_pathway(pathway=[B,A,C]) - if show_graph == True: + if show_graph is True: comp.show_graph(show_learning=True) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.INPUT) for n in {D,B}) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.OUTPUT) for n in {C}) @@ -2176,7 +2176,7 @@ def test_backprop_with_various_intersecting_pathway_configurations(self, configu comp = pnl.Composition(name=configuration) comp.add_backpropagation_learning_pathway(pathway=[C,A,D]) comp.add_backpropagation_learning_pathway(pathway=[B,A]) - if show_graph == True: + if show_graph is True: comp.show_graph(show_learning=True) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.INPUT) for n in {C,B}) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.OUTPUT) for n in {D}) @@ -2199,7 +2199,7 @@ def test_backprop_with_various_intersecting_pathway_configurations(self, configu comp = pnl.Composition(name=configuration) comp.add_backpropagation_learning_pathway(pathway=[D,E,A]) comp.add_backpropagation_learning_pathway(pathway=[C,B,A]) - if show_graph == True: + if show_graph is True: comp.show_graph(show_learning=True) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.INPUT) for n in {D,C}) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.OUTPUT) for n in {A}) @@ -2227,7 +2227,7 @@ def test_backprop_with_various_intersecting_pathway_configurations(self, configu comp = pnl.Composition(name=configuration) comp.add_backpropagation_learning_pathway(pathway=[E,A,B,C]) comp.add_backpropagation_learning_pathway(pathway=[A,D,C,F]) - if show_graph == True: + if show_graph is True: comp.show_graph(show_learning=True) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.INPUT) for n in {E}) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.OUTPUT) for n in {F}) @@ -2258,7 +2258,7 @@ def test_backprop_with_various_intersecting_pathway_configurations(self, configu comp.add_backpropagation_learning_pathway(pathway=[B,A,C]) comp.add_backpropagation_learning_pathway(pathway=[E,B,F]) comp.add_backpropagation_learning_pathway(pathway=[H,D,G,I]) - if show_graph == True: + if show_graph is True: comp.show_graph(show_learning=True) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.INPUT) for n in {E,H}) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.OUTPUT) for n in {C,I}) @@ -2283,7 +2283,7 @@ def test_backprop_with_various_intersecting_pathway_configurations(self, configu comp.add_backpropagation_learning_pathway(pathway=[D,E,A]) comp.add_backpropagation_learning_pathway(pathway=[C,B,F]) comp.add_backpropagation_learning_pathway(pathway=[B,A]) - if show_graph == True: + if show_graph is True: comp.show_graph(show_learning=True) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.INPUT) for n in {D,C}) assert all(n in comp.get_nodes_by_role(pnl.NodeRole.OUTPUT) for n in {A,F}) diff --git a/tests/composition/test_models.py b/tests/composition/test_models.py index ae33659953d..3db9c9c2092 100644 --- a/tests/composition/test_models.py +++ b/tests/composition/test_models.py @@ -256,7 +256,7 @@ def switch_integrator_mode(mechanisms, mode): def switch_noise(mechanisms, noise): for mechanism in mechanisms: - mechanism.noise.base = noise + mechanism.noise = noise def switch_to_initialization_trial(mechanisms): # Turn off accumulation diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py new file mode 100644 index 00000000000..43eec9cf638 --- /dev/null +++ b/tests/composition/test_parameterestimationcomposition.py @@ -0,0 +1,115 @@ +import logging + +import numpy as np +import pytest + +import psyneulink as pnl +from psyneulink.core.components.functions.nonstateful.combinationfunctions import \ + LinearCombination, Concatenate +from psyneulink.core.components.functions.nonstateful.distributionfunctions import DriftDiffusionAnalytical +from psyneulink.core.components.functions.nonstateful.optimizationfunctions import GridSearch +from psyneulink.core.components.projections.modulatory.controlprojection import ControlProjection +from psyneulink.library.components.mechanisms.processing.integrator.ddm import \ + DDM, DECISION_VARIABLE, RESPONSE_TIME, PROBABILITY_UPPER_THRESHOLD + +logger = logging.getLogger(__name__) + + +# All tests are set to run. If you need to skip certain tests, +# see http://doc.pytest.org/en/latest/skipping.html + +# Unit tests for ParameterEstimationComposition + +# objective_function = {None: 2, Concatenate: 2, LinearCombination: 1} +# expected + +pec_test_args = [(None, 2, True, False), # No ObjectiveMechanism (2 inputs), model arg + (None, 2, False, True), # No ObjectiveMechanism (2 inputs), nodes arg + (Concatenate, 2, True, False), # ObjectiveMechanism (2 inputs), model arg + (LinearCombination, 1, True, False), # ObjectiveMechanism (1 input), model arg + # (None, 2, True, True), <- USE TO TEST ERROR + # (None, 2, False, False), <- USE TO TEST ERROR + ] + +@pytest.mark.parametrize( + 'objective_function_arg, expected_input_len, model_spec, node_spec', + pec_test_args, + ids=[f"{x[0]}-{'model' if x[2] else None}-{'nodes' if x[3] else None})" for x in pec_test_args] +) +def test_parameter_estimation_composition(objective_function_arg, expected_input_len, model_spec, node_spec): + """Test with and without ObjectiveMechanism specified, and use of model vs. nodes arg of PEC constructor""" + samples = np.arange(0.1, 1.01, 0.3) + Input = pnl.TransferMechanism(name='Input') + reward = pnl.TransferMechanism(output_ports=[pnl.RESULT, pnl.MEAN, pnl.VARIANCE], + name='reward', + # integrator_mode=True, + # noise=NormalDist # <- FIX 11/3/31: TEST ALLOCATION OF SEED FOR THIS WHEN WORKING + ) + Decision = DDM(function=DriftDiffusionAnalytical(drift_rate=(1.0, + ControlProjection(function=pnl.Linear, + control_signal_params={ + pnl.ALLOCATION_SAMPLES: samples, + })), + threshold=(1.0, + ControlProjection(function=pnl.Linear, + control_signal_params={ + pnl.ALLOCATION_SAMPLES: samples, + })), + noise=0.5, + starting_point=0, + t0=0.45), + output_ports=[DECISION_VARIABLE, + RESPONSE_TIME, + PROBABILITY_UPPER_THRESHOLD], + name='Decision1') + Decision2 = DDM(function=DriftDiffusionAnalytical(drift_rate=1.0, + threshold=1.0, + noise=0.5, + starting_point=0, + t0=0.45), + output_ports=[DECISION_VARIABLE, + RESPONSE_TIME, + PROBABILITY_UPPER_THRESHOLD], + name='Decision2') + + + comp = pnl.Composition(name="evc", retain_old_simulation_data=True) + comp.add_node(reward, required_roles=[pnl.NodeRole.OUTPUT]) + comp.add_node(Decision, required_roles=[pnl.NodeRole.OUTPUT]) + comp.add_node(Decision2, required_roles=[pnl.NodeRole.OUTPUT]) + task_execution_pathway = [Input, pnl.IDENTITY_MATRIX, Decision, Decision2] + comp.add_linear_processing_pathway(task_execution_pathway) + + pec = pnl.ParameterEstimationComposition(name='pec', + model = comp if model_spec else None, + nodes = comp if node_spec else None, + # data = [1,2,3], # For testing error + parameters={('drift_rate',Decision):[1,2], + ('threshold',Decision):[1,2],}, + # parameters={('shrimp_boo',Decision):[1,2], # For testing error + # ('scripblat',Decision2):[1,2],}, # For testing error + outcome_variables=[Decision.output_ports[DECISION_VARIABLE], + Decision.output_ports[RESPONSE_TIME]], + objective_function=objective_function_arg, + optimization_function=GridSearch, + num_estimates=3, + # controller_mode=AFTER, # For testing error + # enable_controller=False # For testing error + ) + ctlr = pec.controller + # pec.show_graph(show_node_structure=pnl.ALL) + assert ctlr.num_outcome_input_ports == 1 + if objective_function_arg: + # pec.show_graph(show_cim=True) + # pec.show_graph(show_node_structure=pnl.ALL) + assert ctlr.objective_mechanism # For objective_function specified + else: + # pec.show_graph(show_cim=True) + # pec.show_graph(show_node_structure=pnl.ALL) + assert not ctlr.objective_mechanism # For objective_function specified + assert len(ctlr.input_ports[pnl.OUTCOME].variable) == expected_input_len + assert len(ctlr.control_signals) == 3 + assert ctlr.function.num_estimates == 3 + assert pnl.RANDOMIZATION_CONTROL_SIGNAL in ctlr.control_signals.names + assert ctlr.control_signals[pnl.RANDOMIZATION_CONTROL_SIGNAL].allocation_samples.num == 3 + # pec.run() diff --git a/tests/composition/test_report.py b/tests/composition/test_report.py index ad4f7c3fcc8..82e79dbf443 100644 --- a/tests/composition/test_report.py +++ b/tests/composition/test_report.py @@ -1,9 +1,7 @@ -import contextlib -import io import sys -import pytest import numpy as np +import pytest import psyneulink as pnl from psyneulink.core.compositions.report import ReportOutput, ReportProgress, ReportSimulations, ReportDevices @@ -12,29 +10,25 @@ @pytest.mark.skipif(sys.platform == 'win32', reason="") class TestReport(): - def test_reportOutputPref_true(self): + def test_reportOutputPref_true(self, capsys): t = pnl.TransferMechanism() t.reportOutputPref = ReportOutput.FULL - f = io.StringIO() - with contextlib.redirect_stdout(f): - t.execute(1) - output = f.getvalue() + t.execute(1) + output = capsys.readouterr().out assert 'input: 1.0' in output assert 'output: 1.0' in output assert 'params' not in output - def test_reportOutputPref_params(self): + def test_reportOutputPref_params(self, capsys): t = pnl.TransferMechanism() t.reportOutputPref = 'params' - f = io.StringIO() - with contextlib.redirect_stdout(f): - t.execute(1, report_output=ReportOutput.FULL) - output = f.getvalue() + t.execute(1, report_output=ReportOutput.FULL) + output = capsys.readouterr().out assert 'input: 1.0' in output assert 'output: 1.0' in output diff --git a/tests/composition/test_runtime_params.py b/tests/composition/test_runtime_params.py index 2e0cef09602..e1f1904ad54 100644 --- a/tests/composition/test_runtime_params.py +++ b/tests/composition/test_runtime_params.py @@ -165,7 +165,7 @@ def test_input_port_param_no_condition(self): C = Composition(pathways=[T1,T2]) T1.function.slope.base = 5 - T2.input_port.function.scale.base = 4 + T2.input_port.function.scale = 4 C.run(inputs={T1: 2.0}, runtime_params={ T1: {'slope': 3}, # Mechanism's function (Linear) parameter @@ -188,10 +188,10 @@ def test_input_port_param_no_condition(self): assert T2.parameter_ports['noise'].parameters.value.get(C) == 0.0 assert T2.function.intercept.base == 0.0 assert T2.function.parameters.intercept.get(C) == 0.0 - assert T2.input_port.weight.base is None - assert T2.input_port.function.scale.base == 4.0 + assert T2.input_port.weight is None + assert T2.input_port.function.scale == 4.0 assert T2.input_port.function.parameters.scale.get(C) == 4.0 - assert T2.input_port.function.weights.base is None + assert T2.input_port.function.weights is None assert T2.input_port.function.parameters.weights.get(C) is None C.run(inputs={T1: 2.0}, ) @@ -347,7 +347,7 @@ def test_mechanism_params_with_combined_conditions_for_all_INPUT_PORT_PARAMS(sel C = Composition(pathways=[T1,T2]) T1.function.slope.base = 5 - T2.input_port.function.scale.base = 4 + T2.input_port.function.scale = 4 C.run(inputs={T1: 2.0}, runtime_params={ T1: {'slope': (3, AtTrial(1))}, # Condition on Mechanism's function (Linear) parameter @@ -372,10 +372,10 @@ def test_mechanism_params_with_combined_conditions_for_all_INPUT_PORT_PARAMS(sel assert T2.parameter_ports['noise'].parameters.value.get(C) == 0.0 assert T2.function.intercept.base == 0.0 assert T2.function.parameters.intercept.get(C) == 0.0 - assert T2.input_port.weight.base is None - assert T2.input_port.function.scale.base == 4.0 + assert T2.input_port.weight is None + assert T2.input_port.function.scale == 4.0 assert T2.input_port.function.parameters.scale.get(C) == 4.0 - assert T2.input_port.function.weights.base is None + assert T2.input_port.function.weights is None assert T2.input_port.function.parameters.weights.get(C) is None # run again to insure restored default for noise after last run @@ -396,7 +396,7 @@ def test_mechanism_params_with_combined_conditions_for_individual_INPUT_PORT_PAR C = Composition(pathways=[[T1,P,T2]]) T1.function.slope.base = 5 - T2.input_port.function.scale.base = 4 + T2.input_port.function.scale = 4 # Run 0: Test INPUT_PORT_PARAMS for InputPort function directly (scale) and in FUNCTION_PARAMS dict (weights) C.run(inputs={T1: 2.0}, runtime_params={ @@ -466,10 +466,8 @@ def test_mechanism_params_with_combined_conditions_for_individual_INPUT_PORT_PAR assert T2.parameter_ports['noise'].parameters.value.get(C) == 0.0 assert T2.function.intercept.base == 0.0 assert T2.function.parameters.intercept.get(C) == 0.0 - assert T2.input_port.weight.base is None - assert T2.input_port.function.scale.base == 4.0 + assert T2.input_port.weight is None assert T2.input_port.function.parameters.scale.get(C) == 4.0 - assert T2.input_port.function.weights.base is None assert T2.input_port.function.parameters.weights.get(C) is None # Final Run: insure restored default for noise after last run @@ -503,10 +501,10 @@ def test_params_for_input_port_and_projection_variable_and_value(self): C = Composition(nodes=[SAMPLE_INPUT, TARGET_INPUT, CM], projections=[P1,P2]) SAMPLE_INPUT.function.slope.base = 3 - CM.input_ports[SAMPLE].function.scale.base = 2 + CM.input_ports[SAMPLE].function.scale = 2 - TARGET_INPUT.input_port.function.scale.base = 4 - CM.input_ports[TARGET].function.scale.base = 1.5 + TARGET_INPUT.input_port.function.scale = 4 + CM.input_ports[TARGET].function.scale = 1.5 C.run(inputs={SAMPLE_INPUT: 2.0, TARGET_INPUT: 5.0}, @@ -603,7 +601,7 @@ def test_params_for_output_port_variable_and_value(self): P2 = MappingProjection(sender=T1.output_ports['SECOND'], receiver=T2) C = Composition(nodes=[T1,T2], projections=[P1,P2]) - T1.output_ports['SECOND'].function.slope.base = 1.5 + T1.output_ports['SECOND'].function.slope = 1.5 # Run 0: Test of both OutputPort variables assigned C.run(inputs={T1: 10.0}, @@ -681,7 +679,7 @@ def test_composition_runtime_param_errors(self): C = Composition(nodes=[T1,T2,CM], projections=[P1,P2]) T1.function.slope.base = 3 - T2.input_port.function.scale.base = 4 + T2.input_port.function.scale = 4 # Bad param specified for Mechanism with pytest.raises(ComponentError) as error_text: diff --git a/tests/composition/test_show_graph.py b/tests/composition/test_show_graph.py index 0ca66e019f1..40a235a97cd 100644 --- a/tests/composition/test_show_graph.py +++ b/tests/composition/test_show_graph.py @@ -2,10 +2,9 @@ import pytest from psyneulink.core.components.functions.nonstateful.learningfunctions import BackPropagation -from psyneulink.core.components.functions.stateful.memoryfunctions import \ - STORAGE_PROB from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear from psyneulink.core.components.functions.stateful.memoryfunctions import DictionaryMemory +from psyneulink.core.components.functions.stateful.memoryfunctions import STORAGE_PROB from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import \ OptimizationControlMechanism @@ -21,6 +20,7 @@ from psyneulink.library.components.mechanisms.processing.integrator.episodicmemorymechanism import \ EpisodicMemoryMechanism, VALUE_INPUT, VALUE_OUTPUT, KEY_INPUT, KEY_OUTPUT +"""These test various elaborate forms of Composition configuration and nesting, in addition to show_graph itself""" class TestSimpleCompositions: def test_process(self): @@ -106,7 +106,7 @@ def test_multiple_projections_to_node_of_nested_composition(self): assert np.allclose(results,expected) gv = comp.show_graph(output_fmt='source') - assert gv == 'digraph "Composition-0" {\n\tgraph [label="Composition-0" overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tCONTEXT [color=green penwidth=3 rank=source shape=oval]\n\tSTIM [color=green penwidth=3 rank=source shape=oval]\n\tEM -> "STIM INPUT LAYER" [label="" arrowhead=normal color=black penwidth=1]\n\tSTIM -> "STIM INPUT LAYER" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTEXT -> "CONTEXT INPUT LAYER" [label="" arrowhead=normal color=black penwidth=1]\n\tEM -> "CONTEXT INPUT LAYER" [label="" arrowhead=normal color=black penwidth=1]\n\tEM [color=black penwidth=1 rank=same shape=oval]\n\tCONTEXT -> EM [label="" arrowhead=normal color=black penwidth=1]\n\t"ControlMechanism-0" -> EM [label="" arrowhead=box color=blue penwidth=1]\n\tSTIM -> EM [label="" arrowhead=normal color=black penwidth=1]\n\t"MATCH LAYER" -> DECISION [label="" arrowhead=normal color=black penwidth=1]\n\tDECISION -> "ControlMechanism-0" [label="" arrowhead=normal color=black penwidth=1]\n\tDECISION [color=red penwidth=3 rank=max shape=oval]\n\t"ControlMechanism-0" [color=blue penwidth=3 rank=max shape=octagon]\n\tsubgraph cluster_FFN {\n\t\tgraph [label=FFN overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"CONTEXT INPUT LAYER" [color=green penwidth=3 rank=source shape=oval]\n\t\t"STIM INPUT LAYER" [color=green penwidth=3 rank=source shape=oval]\n\t\t"CONTEXT INPUT LAYER" -> "MATCH LAYER" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"STIM INPUT LAYER" -> "MATCH LAYER" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"MATCH LAYER" [color=red penwidth=3 rank=max shape=oval]\n\t\tlabel=FFN\n\t}\n}' + assert gv.strip() == 'digraph "Composition-0" {\n\tgraph [label="Composition-0" overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tCONTEXT [color=green penwidth=3 rank=source shape=oval]\n\tSTIM [color=green penwidth=3 rank=source shape=oval]\n\tEM -> "STIM INPUT LAYER" [label="" arrowhead=normal color=black penwidth=1]\n\tSTIM -> "STIM INPUT LAYER" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTEXT -> "CONTEXT INPUT LAYER" [label="" arrowhead=normal color=black penwidth=1]\n\tEM -> "CONTEXT INPUT LAYER" [label="" arrowhead=normal color=black penwidth=1]\n\tEM [color=black penwidth=1 rank=same shape=oval]\n\tCONTEXT -> EM [label="" arrowhead=normal color=black penwidth=1]\n\t"ControlMechanism-0" -> EM [label="" arrowhead=box color=blue penwidth=1]\n\tSTIM -> EM [label="" arrowhead=normal color=black penwidth=1]\n\t"MATCH LAYER" -> DECISION [label="" arrowhead=normal color=black penwidth=1]\n\tDECISION -> "ControlMechanism-0" [label="" arrowhead=normal color=black penwidth=1]\n\tDECISION [color=red penwidth=3 rank=max shape=oval]\n\t"ControlMechanism-0" [color=blue penwidth=3 rank=max shape=octagon]\n\tsubgraph cluster_FFN {\n\t\tgraph [label=FFN overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"CONTEXT INPUT LAYER" [color=green penwidth=3 rank=source shape=oval]\n\t\t"STIM INPUT LAYER" [color=green penwidth=3 rank=source shape=oval]\n\t\t"CONTEXT INPUT LAYER" -> "MATCH LAYER" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"STIM INPUT LAYER" -> "MATCH LAYER" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"MATCH LAYER" [color=red penwidth=3 rank=max shape=oval]\n\t\tlabel=FFN\n\t}\n}' # # FIX: ORDERING PROBLEM WITH SLOPE AND INTERCEPT ENTRIES # gv = comp.show_graph(show_cim=True, show_node_structure=ALL, output_fmt='source') @@ -241,11 +241,11 @@ def test_converging_pathways(self): ), ( {'show_controller': True}, - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tia [color=green penwidth=3 rank=source shape=oval]\n\tia -> ib [label="" arrowhead=normal color=black penwidth=1]\n\t"my ocm" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t"my ocm" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t"my ocm" -> ib [label="" arrowhead=box color=purple penwidth=1]\n\tib [color=red penwidth=3 rank=max shape=oval]\n\t"my ocm" [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}' + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tia [color=green penwidth=3 rank=source shape=oval]\n\tia -> ib [label="" arrowhead=normal color=black penwidth=1]\n\t"my ocm" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t"my ocm" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t"my ocm" -> ib [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> "my ocm" [label="" arrowhead=normal color=purple penwidth=1]\n\tib [color=red penwidth=3 rank=max shape=oval]\n\t"my ocm" [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}' ), ( {'show_controller': True, 'show_node_structure': True}, - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tia:"OutputPort-RESULT" -> ib:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"my ocm":"OutputPort-ia[noise] ControlSignal" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\t"my ocm":"OutputPort-ia[intercept] ControlSignal" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\t"my ocm":"OutputPort-ib[slope] ControlSignal" -> ib:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\tib [label=<
RESULT
OutputPorts
Mechanism:
ib
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\t"my ocm" [label=<
ia[noise] ControlSignalia[intercept] ControlSignalib[slope] ControlSignal
OutputPorts
Mechanism:
my ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n}' + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tia:"OutputPort-RESULT" -> ib:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"my ocm":"OutputPort-ia[noise] ControlSignal" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\t"my ocm":"OutputPort-ia[intercept] ControlSignal" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\t"my ocm":"OutputPort-ib[slope] ControlSignal" -> ib:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ia_InputPort-0" -> "my ocm":"InputPort-Shadowed input of of ia[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\tib [label=<
RESULT
OutputPorts
Mechanism:
ib
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\t"my ocm" [label=<
ia[noise] ControlSignalia[intercept] ControlSignalib[slope] ControlSignal
OutputPorts
Mechanism:
my ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of ia[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n}' ) ] @@ -270,7 +270,7 @@ def test_no_nested_and_controler_name_with_space_in_it( search_space=[[1],[1],[1]]) comp = Composition(name='ocomp', pathways=[ia, ib], controller=ocm) gv = comp.show_graph(output_fmt='source', **show_graph_kwargs) - assert gv == expected_output + assert gv.strip() == expected_output _multiple_nesting_levels_with_control_mech_projection_one_level_deep_data = [ ( @@ -325,7 +325,7 @@ def test_multiple_nesting_levels_with_control_mech_projection_one_level_deep( control_signals=[ControlSignal(projections=[(SLOPE, ma)])]) ocomp = Composition(name='ocomp', pathways=[oa, mcomp, ob, ctl_mech]) gv = ocomp.show_graph(output_fmt='source', **show_graph_kwargs) - assert gv == expected_output + assert gv.strip() == expected_output _nested_learning_data = [ ( @@ -377,40 +377,40 @@ def test_nested_learning(self, show_graph_kwargs, expected_output): ocomp = Composition(name='COMPOSITION', pathways=[input_mech, internal_mech, icomp, output_mech]) gv = ocomp.show_graph(output_fmt='source', **show_graph_kwargs) - assert gv == expected_output + assert gv.strip() == expected_output _nested_learning_test_with_user_specified_target_in_outer_composition_data = [ ( {'show_nested': False, 'show_cim': False, 'show_learning': True}, - 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [color=green penwidth=3 rank=source shape=oval]\n\t"OUTER INPUT" [color=green penwidth=3 rank=source shape=oval]\n\tINTERNAL [color=black penwidth=1 rank=same shape=oval]\n\t"OUTER INPUT" -> INTERNAL [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" [color=pink penwidth=3 rank=same shape=rectangle]\n\tINTERNAL -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" -> "OUTER OUTPUT" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER -> INTERNAL [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [color=purple penwidth=1 rank=min shape=oval]\n\t"OBJECTIVE MECHANISM" -> CONTROLLER [label="" color=purple penwidth=1]\n\t"OUTER INPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" [color=red penwidth=3 rank=max shape=oval]\n\tCONTROLLER [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}' + 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [color=green penwidth=3 rank=source shape=oval]\n\t"OUTER INPUT" [color=green penwidth=3 rank=source shape=oval]\n\tINTERNAL [color=black penwidth=1 rank=same shape=oval]\n\t"OUTER INPUT" -> INTERNAL [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" [color=pink penwidth=3 rank=same shape=rectangle]\n\tINTERNAL -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" -> "OUTER OUTPUT" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER -> INTERNAL [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [color=purple penwidth=1 rank=min shape=oval]\n\t"OBJECTIVE MECHANISM" -> CONTROLLER [label="" color=purple penwidth=1]\n\t"OUTER INPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"COMPOSITION INPUT_CIM" -> CONTROLLER [label="" arrowhead=normal color=purple penwidth=1]\n\t"OUTER OUTPUT" [color=red penwidth=3 rank=max shape=oval]\n\tCONTROLLER [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}' ), ( {'show_nested': NESTED, 'show_cim': False, 'show_learning': True}, - 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [color=green penwidth=3 rank=source shape=oval]\n\t"OUTER INPUT" [color=green penwidth=3 rank=source shape=oval]\n\tINTERNAL [color=black penwidth=1 rank=same shape=oval]\n\t"OUTER INPUT" -> INTERNAL [label="" arrowhead=normal color=black penwidth=1]\n\tINTERNAL -> "INNER INPUT" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET -> Target [label="" arrowhead=normal color=orange penwidth=1]\n\t"INNER OUTPUT" -> "OUTER OUTPUT" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER -> INTERNAL [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [color=purple penwidth=1 rank=min shape=oval]\n\t"OBJECTIVE MECHANISM" -> CONTROLLER [label="" color=purple penwidth=1]\n\t"OUTER INPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" [color=red penwidth=3 rank=max shape=oval]\n\tCONTROLLER [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph "cluster_NESTED COMPOSITION" {\n\t\tgraph [label="NESTED COMPOSITION" overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tTarget [color=orange penwidth=3 rank=min shape=oval]\n\t\t"INNER INPUT" [color=green penwidth=3 rank=source shape=oval]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [color=black penwidth=1 shape=diamond]\n\t\t"INNER INPUT" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [arrowhead=none color=black penwidth=1]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" -> "INNER OUTPUT" [color=black penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" color=orange penwidth=1]\n\t\tComparator [color=orange penwidth=1 rank=min shape=oval]\n\t\t"INNER OUTPUT" -> Comparator [label="" arrowhead=normal color=orange penwidth=1]\n\t\tTarget -> Comparator [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [color=orange penwidth=1 rank=min shape=oval]\n\t\tComparator -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER INPUT" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT" [color=red penwidth=3 rank=max shape=oval]\n\t\tlabel="NESTED COMPOSITION"\n\t}\n}' + 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [color=green penwidth=3 rank=source shape=oval]\n\t"OUTER INPUT" [color=green penwidth=3 rank=source shape=oval]\n\tINTERNAL [color=black penwidth=1 rank=same shape=oval]\n\t"OUTER INPUT" -> INTERNAL [label="" arrowhead=normal color=black penwidth=1]\n\tINTERNAL -> "INNER INPUT" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET -> Target [label="" arrowhead=normal color=orange penwidth=1]\n\t"INNER OUTPUT" -> "OUTER OUTPUT" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER -> INTERNAL [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [color=purple penwidth=1 rank=min shape=oval]\n\t"OBJECTIVE MECHANISM" -> CONTROLLER [label="" color=purple penwidth=1]\n\t"OUTER INPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"COMPOSITION INPUT_CIM" -> CONTROLLER [label="" arrowhead=normal color=purple penwidth=1]\n\t"OUTER OUTPUT" [color=red penwidth=3 rank=max shape=oval]\n\tCONTROLLER [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph "cluster_NESTED COMPOSITION" {\n\t\tgraph [label="NESTED COMPOSITION" overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tTarget [color=orange penwidth=3 rank=min shape=oval]\n\t\t"INNER INPUT" [color=green penwidth=3 rank=source shape=oval]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [color=black penwidth=1 shape=diamond]\n\t\t"INNER INPUT" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [arrowhead=none color=black penwidth=1]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" -> "INNER OUTPUT" [color=black penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" color=orange penwidth=1]\n\t\tComparator [color=orange penwidth=1 rank=min shape=oval]\n\t\t"INNER OUTPUT" -> Comparator [label="" arrowhead=normal color=orange penwidth=1]\n\t\tTarget -> Comparator [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [color=orange penwidth=1 rank=min shape=oval]\n\t\tComparator -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER INPUT" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT" [color=red penwidth=3 rank=max shape=oval]\n\t\tlabel="NESTED COMPOSITION"\n\t}\n}' ), ( {'show_nested': False, 'show_cim': True, 'show_learning': True}, - 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [color=green penwidth=3 rank=source shape=oval]\n\t"OUTER INPUT" [color=green penwidth=3 rank=source shape=oval]\n\tINTERNAL [color=black penwidth=1 rank=same shape=oval]\n\t"OUTER INPUT" -> INTERNAL [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" [color=pink penwidth=3 rank=same shape=rectangle]\n\tINTERNAL -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" -> "OUTER OUTPUT" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"COMPOSITION INPUT_CIM" -> "OUTER INPUT" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM" -> TARGET [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t"OUTER OUTPUT" -> "COMPOSITION OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER -> INTERNAL [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [color=purple penwidth=1 rank=min shape=oval]\n\t"OBJECTIVE MECHANISM" -> CONTROLLER [label="" color=purple penwidth=1]\n\t"OUTER INPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" [color=red penwidth=3 rank=max shape=oval]\n\tCONTROLLER [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}' + 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [color=green penwidth=3 rank=source shape=oval]\n\t"OUTER INPUT" [color=green penwidth=3 rank=source shape=oval]\n\tINTERNAL [color=black penwidth=1 rank=same shape=oval]\n\t"OUTER INPUT" -> INTERNAL [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" [color=pink penwidth=3 rank=same shape=rectangle]\n\tINTERNAL -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" -> "OUTER OUTPUT" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"COMPOSITION INPUT_CIM" -> "OUTER INPUT" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM" -> TARGET [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t"OUTER OUTPUT" -> "COMPOSITION OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER -> INTERNAL [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [color=purple penwidth=1 rank=min shape=oval]\n\t"OBJECTIVE MECHANISM" -> CONTROLLER [label="" color=purple penwidth=1]\n\t"OUTER INPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"COMPOSITION INPUT_CIM" -> CONTROLLER [label="" arrowhead=normal color=purple penwidth=1]\n\t"OUTER OUTPUT" [color=red penwidth=3 rank=max shape=oval]\n\tCONTROLLER [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}' ), ( {'show_nested': NESTED, 'show_cim': True, 'show_learning': True}, - 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [color=green penwidth=3 rank=source shape=oval]\n\t"OUTER INPUT" [color=green penwidth=3 rank=source shape=oval]\n\tINTERNAL [color=black penwidth=1 rank=same shape=oval]\n\t"OUTER INPUT" -> INTERNAL [label="" arrowhead=normal color=black penwidth=1]\n\tINTERNAL -> "NESTED COMPOSITION INPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET -> "NESTED COMPOSITION INPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION OUTPUT_CIM" -> "OUTER OUTPUT" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"COMPOSITION INPUT_CIM" -> "OUTER INPUT" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM" -> TARGET [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t"OUTER OUTPUT" -> "COMPOSITION OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER -> INTERNAL [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [color=purple penwidth=1 rank=min shape=oval]\n\t"OBJECTIVE MECHANISM" -> CONTROLLER [label="" color=purple penwidth=1]\n\t"OUTER INPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" [color=red penwidth=3 rank=max shape=oval]\n\tCONTROLLER [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph "cluster_NESTED COMPOSITION" {\n\t\tgraph [label="NESTED COMPOSITION" overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tTarget [color=orange penwidth=3 rank=min shape=oval]\n\t\t"INNER INPUT" [color=green penwidth=3 rank=source shape=oval]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [color=black penwidth=1 shape=diamond]\n\t\t"INNER INPUT" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [arrowhead=none color=black penwidth=1]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" -> "INNER OUTPUT" [color=black penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" color=orange penwidth=1]\n\t\t"NESTED COMPOSITION INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"NESTED COMPOSITION INPUT_CIM" -> "INNER INPUT" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"NESTED COMPOSITION INPUT_CIM" -> Target [label="" arrowhead=normal color=black penwidth=1]\n\t\t"NESTED COMPOSITION OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\t"INNER OUTPUT" -> "NESTED COMPOSITION OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tComparator [color=orange penwidth=1 rank=min shape=oval]\n\t\t"INNER OUTPUT" -> Comparator [label="" arrowhead=normal color=orange penwidth=1]\n\t\tTarget -> Comparator [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [color=orange penwidth=1 rank=min shape=oval]\n\t\tComparator -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER INPUT" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT" [color=red penwidth=3 rank=max shape=oval]\n\t\tlabel="NESTED COMPOSITION"\n\t}\n}' + 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [color=green penwidth=3 rank=source shape=oval]\n\t"OUTER INPUT" [color=green penwidth=3 rank=source shape=oval]\n\tINTERNAL [color=black penwidth=1 rank=same shape=oval]\n\t"OUTER INPUT" -> INTERNAL [label="" arrowhead=normal color=black penwidth=1]\n\tINTERNAL -> "NESTED COMPOSITION INPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET -> "NESTED COMPOSITION INPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION OUTPUT_CIM" -> "OUTER OUTPUT" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"COMPOSITION INPUT_CIM" -> "OUTER INPUT" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM" -> TARGET [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t"OUTER OUTPUT" -> "COMPOSITION OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER -> INTERNAL [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [color=purple penwidth=1 rank=min shape=oval]\n\t"OBJECTIVE MECHANISM" -> CONTROLLER [label="" color=purple penwidth=1]\n\t"OUTER INPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" -> "OBJECTIVE MECHANISM" [label="" color=purple penwidth=1]\n\t"COMPOSITION INPUT_CIM" -> CONTROLLER [label="" arrowhead=normal color=purple penwidth=1]\n\t"OUTER OUTPUT" [color=red penwidth=3 rank=max shape=oval]\n\tCONTROLLER [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph "cluster_NESTED COMPOSITION" {\n\t\tgraph [label="NESTED COMPOSITION" overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tTarget [color=orange penwidth=3 rank=min shape=oval]\n\t\t"INNER INPUT" [color=green penwidth=3 rank=source shape=oval]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [color=black penwidth=1 shape=diamond]\n\t\t"INNER INPUT" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [arrowhead=none color=black penwidth=1]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" -> "INNER OUTPUT" [color=black penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" color=orange penwidth=1]\n\t\t"NESTED COMPOSITION INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"NESTED COMPOSITION INPUT_CIM" -> "INNER INPUT" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"NESTED COMPOSITION INPUT_CIM" -> Target [label="" arrowhead=normal color=black penwidth=1]\n\t\t"NESTED COMPOSITION OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\t"INNER OUTPUT" -> "NESTED COMPOSITION OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tComparator [color=orange penwidth=1 rank=min shape=oval]\n\t\t"INNER OUTPUT" -> Comparator [label="" arrowhead=normal color=orange penwidth=1]\n\t\tTarget -> Comparator [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [color=orange penwidth=1 rank=min shape=oval]\n\t\tComparator -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER INPUT" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT" [color=red penwidth=3 rank=max shape=oval]\n\t\tlabel="NESTED COMPOSITION"\n\t}\n}' ), ( {'show_nested': False, 'show_cim': False, 'show_node_structure': True, 'show_learning': True}, - 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [label=<
OutputPort-0
OutputPorts
Mechanism:
TARGET
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t"OUTER INPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER INPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tINTERNAL [label=<
OutputPort-0
OutputPorts
Mechanism:
INTERNAL
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=black penwidth=1 rank=same shape=plaintext]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> INTERNAL:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" [color=pink penwidth=3 rank=same shape=rectangle]\n\tINTERNAL:"OutputPort-OutputPort-0" -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET:"OutputPort-OutputPort-0" -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" -> "OUTER OUTPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER:"OutputPort-INTERNAL[slope] ControlSignal" -> INTERNAL:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [label=<
OUTCOME
OutputPorts
Mechanism:
OBJECTIVE MECHANISM
ParameterPorts
offset
scale
InputPorts
Value of OUTER INPUT [OutputPort-0]Value of OUTER OUTPUT [OutputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\t"OBJECTIVE MECHANISM":"OutputPort-OUTCOME" -> CONTROLLER:"InputPort-OUTCOME" [label="" color=purple penwidth=1]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER INPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER OUTPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER OUTPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tCONTROLLER [label=<
INTERNAL[slope] ControlSignal
OutputPorts
Mechanism:
CONTROLLER
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n}' + 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [label=<
OutputPort-0
OutputPorts
Mechanism:
TARGET
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t"OUTER INPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER INPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tINTERNAL [label=<
OutputPort-0
OutputPorts
Mechanism:
INTERNAL
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=black penwidth=1 rank=same shape=plaintext]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> INTERNAL:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" [color=pink penwidth=3 rank=same shape=rectangle]\n\tINTERNAL:"OutputPort-OutputPort-0" -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET:"OutputPort-OutputPort-0" -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" -> "OUTER OUTPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER:"OutputPort-INTERNAL[slope] ControlSignal" -> INTERNAL:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [label=<
OUTCOME
OutputPorts
Mechanism:
OBJECTIVE MECHANISM
ParameterPorts
offset
scale
InputPorts
Value of OUTER INPUT [OutputPort-0]Value of OUTER OUTPUT [OutputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\t"OBJECTIVE MECHANISM":"OutputPort-OUTCOME" -> CONTROLLER:"InputPort-OUTCOME" [label="" color=purple penwidth=1]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER INPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER OUTPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_OUTER INPUT_InputPort-0" -> CONTROLLER:"InputPort-Shadowed input of of OUTER INPUT[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"OUTER OUTPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER OUTPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tCONTROLLER [label=<
INTERNAL[slope] ControlSignal
OutputPorts
Mechanism:
CONTROLLER
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of OUTER INPUT[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n}' ), ( {'show_nested': NESTED, 'show_cim': False, 'show_node_structure': True, 'show_learning': True}, - 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [label=<
OutputPort-0
OutputPorts
Mechanism:
TARGET
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t"OUTER INPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER INPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tINTERNAL [label=<
OutputPort-0
OutputPorts
Mechanism:
INTERNAL
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=black penwidth=1 rank=same shape=plaintext]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> INTERNAL:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tINTERNAL:"OutputPort-OutputPort-0" -> "INNER INPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET:"OutputPort-OutputPort-0" -> Target:"InputPort-InputPort-0" [label="" arrowhead=normal color=orange penwidth=1]\n\t"INNER OUTPUT":"OutputPort-OutputPort-0" -> "OUTER OUTPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER:"OutputPort-INTERNAL[slope] ControlSignal" -> INTERNAL:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [label=<
OUTCOME
OutputPorts
Mechanism:
OBJECTIVE MECHANISM
ParameterPorts
offset
scale
InputPorts
Value of OUTER INPUT [OutputPort-0]Value of OUTER OUTPUT [OutputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\t"OBJECTIVE MECHANISM":"OutputPort-OUTCOME" -> CONTROLLER:"InputPort-OUTCOME" [label="" color=purple penwidth=1]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER INPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER OUTPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER OUTPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tCONTROLLER [label=<
INTERNAL[slope] ControlSignal
OutputPorts
Mechanism:
CONTROLLER
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph "cluster_NESTED COMPOSITION" {\n\t\tgraph [label="NESTED COMPOSITION" overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tTarget [label=<
OutputPort-0
OutputPorts
Mechanism:
Target
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=orange penwidth=3 rank=min shape=plaintext]\n\t\t"INNER INPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
INNER INPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [color=black penwidth=1 shape=diamond]\n\t\t"INNER INPUT":"OutputPort-OutputPort-0" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [arrowhead=none color=black penwidth=1]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" -> "INNER OUTPUT":"InputPort-InputPort-0" [color=black penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"OutputPort-LearningSignal" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" color=orange penwidth=1]\n\t\tComparator [label=<
OUTCOMEMSE
OutputPorts
Mechanism:
Comparator
ParameterPorts
offset
scale
InputPorts
SAMPLETARGET
> color=orange penwidth=1 rank=min shape=plaintext]\n\t\t"INNER OUTPUT":"OutputPort-OutputPort-0" -> Comparator:"InputPort-SAMPLE" [label="" arrowhead=normal color=orange penwidth=1]\n\t\tTarget:"OutputPort-OutputPort-0" -> Comparator:"InputPort-TARGET" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label=<
error_signalLearningSignal
OutputPorts
Mechanism:
Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]
ParameterPorts
learning_rate
InputPorts
activation_inputactivation_outputerror_signal
> color=orange penwidth=1 rank=min shape=plaintext]\n\t\tComparator:"OutputPort-OUTCOME" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"InputPort-error_signal" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER INPUT":"OutputPort-OutputPort-0" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"InputPort-activation_input" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT":"OutputPort-OutputPort-0" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"InputPort-activation_output" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
INNER OUTPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\t\tlabel="NESTED COMPOSITION"\n\t}\n}' + 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [label=<
OutputPort-0
OutputPorts
Mechanism:
TARGET
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t"OUTER INPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER INPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tINTERNAL [label=<
OutputPort-0
OutputPorts
Mechanism:
INTERNAL
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=black penwidth=1 rank=same shape=plaintext]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> INTERNAL:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tINTERNAL:"OutputPort-OutputPort-0" -> "INNER INPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET:"OutputPort-OutputPort-0" -> Target:"InputPort-InputPort-0" [label="" arrowhead=normal color=orange penwidth=1]\n\t"INNER OUTPUT":"OutputPort-OutputPort-0" -> "OUTER OUTPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER:"OutputPort-INTERNAL[slope] ControlSignal" -> INTERNAL:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [label=<
OUTCOME
OutputPorts
Mechanism:
OBJECTIVE MECHANISM
ParameterPorts
offset
scale
InputPorts
Value of OUTER INPUT [OutputPort-0]Value of OUTER OUTPUT [OutputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\t"OBJECTIVE MECHANISM":"OutputPort-OUTCOME" -> CONTROLLER:"InputPort-OUTCOME" [label="" color=purple penwidth=1]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER INPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER OUTPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_OUTER INPUT_InputPort-0" -> CONTROLLER:"InputPort-Shadowed input of of OUTER INPUT[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"OUTER OUTPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER OUTPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tCONTROLLER [label=<
INTERNAL[slope] ControlSignal
OutputPorts
Mechanism:
CONTROLLER
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of OUTER INPUT[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph "cluster_NESTED COMPOSITION" {\n\t\tgraph [label="NESTED COMPOSITION" overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tTarget [label=<
OutputPort-0
OutputPorts
Mechanism:
Target
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=orange penwidth=3 rank=min shape=plaintext]\n\t\t"INNER INPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
INNER INPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [color=black penwidth=1 shape=diamond]\n\t\t"INNER INPUT":"OutputPort-OutputPort-0" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [arrowhead=none color=black penwidth=1]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" -> "INNER OUTPUT":"InputPort-InputPort-0" [color=black penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"OutputPort-LearningSignal" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" color=orange penwidth=1]\n\t\tComparator [label=<
OUTCOMEMSE
OutputPorts
Mechanism:
Comparator
ParameterPorts
offset
scale
InputPorts
SAMPLETARGET
> color=orange penwidth=1 rank=min shape=plaintext]\n\t\t"INNER OUTPUT":"OutputPort-OutputPort-0" -> Comparator:"InputPort-SAMPLE" [label="" arrowhead=normal color=orange penwidth=1]\n\t\tTarget:"OutputPort-OutputPort-0" -> Comparator:"InputPort-TARGET" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label=<
error_signalLearningSignal
OutputPorts
Mechanism:
Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]
ParameterPorts
learning_rate
InputPorts
activation_inputactivation_outputerror_signal
> color=orange penwidth=1 rank=min shape=plaintext]\n\t\tComparator:"OutputPort-OUTCOME" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"InputPort-error_signal" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER INPUT":"OutputPort-OutputPort-0" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"InputPort-activation_input" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT":"OutputPort-OutputPort-0" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"InputPort-activation_output" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
INNER OUTPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\t\tlabel="NESTED COMPOSITION"\n\t}\n}' ), ( {'show_nested': False, 'show_cim': True, 'show_node_structure': True, 'show_learning': True}, - 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [label=<
OutputPort-0
OutputPorts
Mechanism:
TARGET
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t"OUTER INPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER INPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tINTERNAL [label=<
OutputPort-0
OutputPorts
Mechanism:
INTERNAL
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=black penwidth=1 rank=same shape=plaintext]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> INTERNAL:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" [color=pink penwidth=3 rank=same shape=rectangle]\n\tINTERNAL:"OutputPort-OutputPort-0" -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET:"OutputPort-OutputPort-0" -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" -> "OUTER OUTPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM" [label=<
INPUT_CIM_OUTER INPUT_InputPort-0INPUT_CIM_TARGET_InputPort-0
OutputPorts
Mechanism:
COMPOSITION Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_OUTER INPUT_InputPort-0" -> "OUTER INPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_TARGET_InputPort-0" -> TARGET:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION OUTPUT_CIM" [label=<
Mechanism:
COMPOSITION Output_CIM
InputPorts
OUTPUT_CIM_OUTER OUTPUT_OutputPort-0
> color=red penwidth=1 rank=same shape=plaintext]\n\t"OUTER OUTPUT":"OutputPort-OutputPort-0" -> "COMPOSITION OUTPUT_CIM":"InputPort-OUTPUT_CIM_OUTER OUTPUT_OutputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER:"OutputPort-INTERNAL[slope] ControlSignal" -> INTERNAL:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [label=<
OUTCOME
OutputPorts
Mechanism:
OBJECTIVE MECHANISM
ParameterPorts
offset
scale
InputPorts
Value of OUTER INPUT [OutputPort-0]Value of OUTER OUTPUT [OutputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\t"OBJECTIVE MECHANISM":"OutputPort-OUTCOME" -> CONTROLLER:"InputPort-OUTCOME" [label="" color=purple penwidth=1]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER INPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER OUTPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER OUTPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tCONTROLLER [label=<
INTERNAL[slope] ControlSignal
OutputPorts
Mechanism:
CONTROLLER
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n}' + 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [label=<
OutputPort-0
OutputPorts
Mechanism:
TARGET
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t"OUTER INPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER INPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tINTERNAL [label=<
OutputPort-0
OutputPorts
Mechanism:
INTERNAL
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=black penwidth=1 rank=same shape=plaintext]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> INTERNAL:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" [color=pink penwidth=3 rank=same shape=rectangle]\n\tINTERNAL:"OutputPort-OutputPort-0" -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET:"OutputPort-OutputPort-0" -> "NESTED COMPOSITION" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION" -> "OUTER OUTPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM" [label=<
INPUT_CIM_OUTER INPUT_InputPort-0INPUT_CIM_TARGET_InputPort-0
OutputPorts
Mechanism:
COMPOSITION Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_OUTER INPUT_InputPort-0" -> "OUTER INPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_TARGET_InputPort-0" -> TARGET:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION OUTPUT_CIM" [label=<
Mechanism:
COMPOSITION Output_CIM
InputPorts
OUTPUT_CIM_OUTER OUTPUT_OutputPort-0
> color=red penwidth=1 rank=same shape=plaintext]\n\t"OUTER OUTPUT":"OutputPort-OutputPort-0" -> "COMPOSITION OUTPUT_CIM":"InputPort-OUTPUT_CIM_OUTER OUTPUT_OutputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER:"OutputPort-INTERNAL[slope] ControlSignal" -> INTERNAL:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [label=<
OUTCOME
OutputPorts
Mechanism:
OBJECTIVE MECHANISM
ParameterPorts
offset
scale
InputPorts
Value of OUTER INPUT [OutputPort-0]Value of OUTER OUTPUT [OutputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\t"OBJECTIVE MECHANISM":"OutputPort-OUTCOME" -> CONTROLLER:"InputPort-OUTCOME" [label="" color=purple penwidth=1]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER INPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER OUTPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_OUTER INPUT_InputPort-0" -> CONTROLLER:"InputPort-Shadowed input of of OUTER INPUT[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"OUTER OUTPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER OUTPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tCONTROLLER [label=<
INTERNAL[slope] ControlSignal
OutputPorts
Mechanism:
CONTROLLER
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of OUTER INPUT[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n}' ), ( {'show_nested': NESTED, 'show_cim': True, 'show_node_structure': True, 'show_learning': True}, - 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [label=<
OutputPort-0
OutputPorts
Mechanism:
TARGET
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t"OUTER INPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER INPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tINTERNAL [label=<
OutputPort-0
OutputPorts
Mechanism:
INTERNAL
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=black penwidth=1 rank=same shape=plaintext]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> INTERNAL:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tINTERNAL:"OutputPort-OutputPort-0" -> "NESTED COMPOSITION INPUT_CIM":"InputPort-INPUT_CIM_INNER INPUT_InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET:"OutputPort-OutputPort-0" -> "NESTED COMPOSITION INPUT_CIM":"InputPort-INPUT_CIM_Target_InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION OUTPUT_CIM":"OutputPort-OUTPUT_CIM_INNER OUTPUT_OutputPort-0" -> "OUTER OUTPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM" [label=<
INPUT_CIM_OUTER INPUT_InputPort-0INPUT_CIM_TARGET_InputPort-0
OutputPorts
Mechanism:
COMPOSITION Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_OUTER INPUT_InputPort-0" -> "OUTER INPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_TARGET_InputPort-0" -> TARGET:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION OUTPUT_CIM" [label=<
Mechanism:
COMPOSITION Output_CIM
InputPorts
OUTPUT_CIM_OUTER OUTPUT_OutputPort-0
> color=red penwidth=1 rank=same shape=plaintext]\n\t"OUTER OUTPUT":"OutputPort-OutputPort-0" -> "COMPOSITION OUTPUT_CIM":"InputPort-OUTPUT_CIM_OUTER OUTPUT_OutputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER:"OutputPort-INTERNAL[slope] ControlSignal" -> INTERNAL:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [label=<
OUTCOME
OutputPorts
Mechanism:
OBJECTIVE MECHANISM
ParameterPorts
offset
scale
InputPorts
Value of OUTER INPUT [OutputPort-0]Value of OUTER OUTPUT [OutputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\t"OBJECTIVE MECHANISM":"OutputPort-OUTCOME" -> CONTROLLER:"InputPort-OUTCOME" [label="" color=purple penwidth=1]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER INPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER OUTPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER OUTPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tCONTROLLER [label=<
INTERNAL[slope] ControlSignal
OutputPorts
Mechanism:
CONTROLLER
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph "cluster_NESTED COMPOSITION" {\n\t\tgraph [label="NESTED COMPOSITION" overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tTarget [label=<
OutputPort-0
OutputPorts
Mechanism:
Target
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=orange penwidth=3 rank=min shape=plaintext]\n\t\t"INNER INPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
INNER INPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [color=black penwidth=1 shape=diamond]\n\t\t"INNER INPUT":"OutputPort-OutputPort-0" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [arrowhead=none color=black penwidth=1]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" -> "INNER OUTPUT":"InputPort-InputPort-0" [color=black penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"OutputPort-LearningSignal" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" color=orange penwidth=1]\n\t\t"NESTED COMPOSITION INPUT_CIM" [label=<
INPUT_CIM_INNER INPUT_InputPort-0INPUT_CIM_Target_InputPort-0
OutputPorts
Mechanism:
NESTED COMPOSITION Input_CIM
InputPorts
INPUT_CIM_INNER INPUT_InputPort-0INPUT_CIM_Target_InputPort-0
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t"NESTED COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_INNER INPUT_InputPort-0" -> "INNER INPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"NESTED COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_Target_InputPort-0" -> Target:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"NESTED COMPOSITION OUTPUT_CIM" [label=<
OUTPUT_CIM_INNER OUTPUT_OutputPort-0
OutputPorts
Mechanism:
NESTED COMPOSITION Output_CIM
InputPorts
OUTPUT_CIM_INNER OUTPUT_OutputPort-0
> color=red penwidth=1 rank=same shape=plaintext]\n\t\t"INNER OUTPUT":"OutputPort-OutputPort-0" -> "NESTED COMPOSITION OUTPUT_CIM":"InputPort-OUTPUT_CIM_INNER OUTPUT_OutputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\tComparator [label=<
OUTCOMEMSE
OutputPorts
Mechanism:
Comparator
ParameterPorts
offset
scale
InputPorts
SAMPLETARGET
> color=orange penwidth=1 rank=min shape=plaintext]\n\t\t"INNER OUTPUT":"OutputPort-OutputPort-0" -> Comparator:"InputPort-SAMPLE" [label="" arrowhead=normal color=orange penwidth=1]\n\t\tTarget:"OutputPort-OutputPort-0" -> Comparator:"InputPort-TARGET" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label=<
error_signalLearningSignal
OutputPorts
Mechanism:
Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]
ParameterPorts
learning_rate
InputPorts
activation_inputactivation_outputerror_signal
> color=orange penwidth=1 rank=min shape=plaintext]\n\t\tComparator:"OutputPort-OUTCOME" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"InputPort-error_signal" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER INPUT":"OutputPort-OutputPort-0" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"InputPort-activation_input" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT":"OutputPort-OutputPort-0" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"InputPort-activation_output" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
INNER OUTPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\t\tlabel="NESTED COMPOSITION"\n\t}\n}' + 'digraph COMPOSITION {\n\tgraph [label=COMPOSITION overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\tTARGET [label=<
OutputPort-0
OutputPorts
Mechanism:
TARGET
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t"OUTER INPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER INPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tINTERNAL [label=<
OutputPort-0
OutputPorts
Mechanism:
INTERNAL
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=black penwidth=1 rank=same shape=plaintext]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> INTERNAL:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tINTERNAL:"OutputPort-OutputPort-0" -> "NESTED COMPOSITION INPUT_CIM":"InputPort-INPUT_CIM_INNER INPUT_InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tTARGET:"OutputPort-OutputPort-0" -> "NESTED COMPOSITION INPUT_CIM":"InputPort-INPUT_CIM_Target_InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"NESTED COMPOSITION OUTPUT_CIM":"OutputPort-OUTPUT_CIM_INNER OUTPUT_OutputPort-0" -> "OUTER OUTPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM" [label=<
INPUT_CIM_OUTER INPUT_InputPort-0INPUT_CIM_TARGET_InputPort-0
OutputPorts
Mechanism:
COMPOSITION Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_OUTER INPUT_InputPort-0" -> "OUTER INPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_TARGET_InputPort-0" -> TARGET:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"COMPOSITION OUTPUT_CIM" [label=<
Mechanism:
COMPOSITION Output_CIM
InputPorts
OUTPUT_CIM_OUTER OUTPUT_OutputPort-0
> color=red penwidth=1 rank=same shape=plaintext]\n\t"OUTER OUTPUT":"OutputPort-OutputPort-0" -> "COMPOSITION OUTPUT_CIM":"InputPort-OUTPUT_CIM_OUTER OUTPUT_OutputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tCONTROLLER:"OutputPort-INTERNAL[slope] ControlSignal" -> INTERNAL:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"OBJECTIVE MECHANISM" [label=<
OUTCOME
OutputPorts
Mechanism:
OBJECTIVE MECHANISM
ParameterPorts
offset
scale
InputPorts
Value of OUTER INPUT [OutputPort-0]Value of OUTER OUTPUT [OutputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\t"OBJECTIVE MECHANISM":"OutputPort-OUTCOME" -> CONTROLLER:"InputPort-OUTCOME" [label="" color=purple penwidth=1]\n\t"OUTER INPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER INPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"OUTER OUTPUT":"OutputPort-OutputPort-0" -> "OBJECTIVE MECHANISM":"InputPort-Value of OUTER OUTPUT [OutputPort-0]" [label="" color=purple penwidth=1]\n\t"COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_OUTER INPUT_InputPort-0" -> CONTROLLER:"InputPort-Shadowed input of of OUTER INPUT[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"OUTER OUTPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
OUTER OUTPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tCONTROLLER [label=<
INTERNAL[slope] ControlSignal
OutputPorts
Mechanism:
CONTROLLER
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of OUTER INPUT[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph "cluster_NESTED COMPOSITION" {\n\t\tgraph [label="NESTED COMPOSITION" overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tTarget [label=<
OutputPort-0
OutputPorts
Mechanism:
Target
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=orange penwidth=3 rank=min shape=plaintext]\n\t\t"INNER INPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
INNER INPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [color=black penwidth=1 shape=diamond]\n\t\t"INNER INPUT":"OutputPort-OutputPort-0" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [arrowhead=none color=black penwidth=1]\n\t\t"MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" -> "INNER OUTPUT":"InputPort-InputPort-0" [color=black penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"OutputPort-LearningSignal" -> "MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label="" color=orange penwidth=1]\n\t\t"NESTED COMPOSITION INPUT_CIM" [label=<
INPUT_CIM_INNER INPUT_InputPort-0INPUT_CIM_Target_InputPort-0
OutputPorts
Mechanism:
NESTED COMPOSITION Input_CIM
InputPorts
INPUT_CIM_INNER INPUT_InputPort-0INPUT_CIM_Target_InputPort-0
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t"NESTED COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_INNER INPUT_InputPort-0" -> "INNER INPUT":"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"NESTED COMPOSITION INPUT_CIM":"OutputPort-INPUT_CIM_Target_InputPort-0" -> Target:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"NESTED COMPOSITION OUTPUT_CIM" [label=<
OUTPUT_CIM_INNER OUTPUT_OutputPort-0
OutputPorts
Mechanism:
NESTED COMPOSITION Output_CIM
InputPorts
OUTPUT_CIM_INNER OUTPUT_OutputPort-0
> color=red penwidth=1 rank=same shape=plaintext]\n\t\t"INNER OUTPUT":"OutputPort-OutputPort-0" -> "NESTED COMPOSITION OUTPUT_CIM":"InputPort-OUTPUT_CIM_INNER OUTPUT_OutputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\tComparator [label=<
OUTCOMEMSE
OutputPorts
Mechanism:
Comparator
ParameterPorts
offset
scale
InputPorts
SAMPLETARGET
> color=orange penwidth=1 rank=min shape=plaintext]\n\t\t"INNER OUTPUT":"OutputPort-OutputPort-0" -> Comparator:"InputPort-SAMPLE" [label="" arrowhead=normal color=orange penwidth=1]\n\t\tTarget:"OutputPort-OutputPort-0" -> Comparator:"InputPort-TARGET" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]" [label=<
error_signalLearningSignal
OutputPorts
Mechanism:
Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]
ParameterPorts
learning_rate
InputPorts
activation_inputactivation_outputerror_signal
> color=orange penwidth=1 rank=min shape=plaintext]\n\t\tComparator:"OutputPort-OUTCOME" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"InputPort-error_signal" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER INPUT":"OutputPort-OutputPort-0" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"InputPort-activation_input" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT":"OutputPort-OutputPort-0" -> "Learning Mechanism for MappingProjection from INNER INPUT[OutputPort-0] to INNER OUTPUT[InputPort-0]":"InputPort-activation_output" [label="" arrowhead=normal color=orange penwidth=1]\n\t\t"INNER OUTPUT" [label=<
OutputPort-0
OutputPorts
Mechanism:
INNER OUTPUT
ParameterPorts
intercept
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\t\tlabel="NESTED COMPOSITION"\n\t}\n}' ), ] @@ -449,7 +449,7 @@ def test_nested_learning_test_with_user_specified_target_in_outer_composition( assert icomp.input_CIM.output_ports['INPUT_CIM_Target_InputPort-0'].efferents[0].receiver.owner == p.target gv = ocomp.show_graph(output_fmt='source', **show_graph_kwargs) - assert gv == expected_output + assert gv.strip() == expected_output # def test_nested_learning_test_with_user_specified_target_in_outer_composition_using_pathway_notation(self): # ia = ProcessingMechanism(name='INNER INPUT') @@ -497,23 +497,23 @@ def test_nested_learning_test_with_user_specified_target_in_outer_composition( {'show_node_structure': True, 'show_nested': NESTED}, {'show_node_structure': True, 'show_cim': True, 'show_nested': False}, {'show_node_structure': True, 'show_cim': True, 'show_nested': INSET}, - {'show_node_structure': True, 'show_cim': True, 'show_nested': NESTED}, + {'show_node_structure': True, 'show_cim': True, 'show_nested': NESTED} ] # each item corresponds to the same item in _nested_show_graph_kwargs above _of_show_nested_show_cim_and_show_node_structure_expected_outputs = [ - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\toa -> ia [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech -> ia [label="" arrowhead=box color=blue penwidth=1]\n\tia -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> ia [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> ia [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=normal color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"icomp INPUT_CIM" -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\tia -> "icomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\toa -> "icomp INPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=blue penwidth=1]\n\t"icomp OUTPUT_CIM" -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"icomp INPUT_CIM" -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\tia -> "icomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\toa:"OutputPort-RESULT" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\tia:"OutputPort-RESULT" -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_ob_InputPort-0INPUT_CIM_oa_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_ob_RESULTOUTPUT_CIM_oc_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=normal color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_ob_InputPort-0INPUT_CIM_oa_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_ob_RESULTOUTPUT_CIM_oc_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [label=<
INPUT_CIM_ia_InputPort-0
OutputPorts
Mechanism:
icomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t"icomp INPUT_CIM":"OutputPort-INPUT_CIM_ia_InputPort-0" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
OutputPorts
Mechanism:
icomp Parameter_CIM
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [label=<
Mechanism:
icomp Output_CIM
InputPorts
OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\tia:"OutputPort-RESULT" -> "icomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\toa:"OutputPort-RESULT" -> "icomp INPUT_CIM":"InputPort-INPUT_CIM_ia_InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> "icomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_slope" [label="" arrowhead=normal color=blue penwidth=1]\n\t"icomp OUTPUT_CIM":"OutputPort-OUTPUT_CIM_ia_RESULT" -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_ob_InputPort-0INPUT_CIM_oa_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_ob_RESULTOUTPUT_CIM_oc_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> "icomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_noise" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> "icomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_intercept" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [label=<
INPUT_CIM_ia_InputPort-0
OutputPorts
Mechanism:
icomp Input_CIM
InputPorts
INPUT_CIM_ia_InputPort-0
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t"icomp INPUT_CIM":"OutputPort-INPUT_CIM_ia_InputPort-0" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
OutputPorts
Mechanism:
icomp Parameter_CIM
InputPorts
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [label=<
OUTPUT_CIM_ia_RESULT
OutputPorts
Mechanism:
icomp Output_CIM
InputPorts
OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\tia:"OutputPort-RESULT" -> "icomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\toa -> ia [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech -> ia [label="" arrowhead=box color=blue penwidth=1]\n\tia -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> ia [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> ia [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=normal color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"icomp INPUT_CIM" -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\tia -> "icomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\toa -> "icomp INPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=blue penwidth=1]\n\t"icomp OUTPUT_CIM" -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"icomp INPUT_CIM" -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\tia -> "icomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of ob[InputPort-0]Shadowed input of of oa[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of ob[InputPort-0]Shadowed input of of oa[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\toa:"OutputPort-RESULT" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\tia:"OutputPort-RESULT" -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of ob[InputPort-0]Shadowed input of of oa[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_ob_InputPort-0INPUT_CIM_oa_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_ob_RESULTOUTPUT_CIM_oc_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of ob[InputPort-0]Shadowed input of of oa[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=normal color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_ob_InputPort-0INPUT_CIM_oa_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_ob_RESULTOUTPUT_CIM_oc_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of ob[InputPort-0]Shadowed input of of oa[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [label=<
INPUT_CIM_ia_InputPort-0
OutputPorts
Mechanism:
icomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t"icomp INPUT_CIM":"OutputPort-INPUT_CIM_ia_InputPort-0" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
OutputPorts
Mechanism:
icomp Parameter_CIM
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [label=<
Mechanism:
icomp Output_CIM
InputPorts
OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\tia:"OutputPort-RESULT" -> "icomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\toa:"OutputPort-RESULT" -> "icomp INPUT_CIM":"InputPort-INPUT_CIM_ia_InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> "icomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_slope" [label="" arrowhead=normal color=blue penwidth=1]\n\t"icomp OUTPUT_CIM":"OutputPort-OUTPUT_CIM_ia_RESULT" -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_ob_InputPort-0INPUT_CIM_oa_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_ob_RESULTOUTPUT_CIM_oc_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> "icomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_noise" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> "icomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_intercept" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of ob[InputPort-0]Shadowed input of of oa[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [label=<
INPUT_CIM_ia_InputPort-0
OutputPorts
Mechanism:
icomp Input_CIM
InputPorts
INPUT_CIM_ia_InputPort-0
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t"icomp INPUT_CIM":"OutputPort-INPUT_CIM_ia_InputPort-0" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
OutputPorts
Mechanism:
icomp Parameter_CIM
InputPorts
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [label=<
OUTPUT_CIM_ia_RESULT
OutputPorts
Mechanism:
icomp Output_CIM
InputPorts
OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\tia:"OutputPort-RESULT" -> "icomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}' ] @pytest.mark.parametrize( @@ -552,22 +552,22 @@ def test_of_show_nested_show_cim_and_show_node_structure( ocomp.add_controller(ocm) gv = ocomp.show_graph(output_fmt='source', **show_graph_kwargs) - assert gv == expected_output + assert gv.strip() == expected_output # each item corresponds to the same item in _nested_show_graph_kwargs above _of_show_3_level_nested_show_cim_and_show_node_structure_outputs = [ - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> midcomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> midcomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [color=green penwidth=3 rank=source shape=oval]\n\t\ticomp [color=red penwidth=3 rank=max shape=rectangle]\n\t\tma -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\toa -> ma [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech -> ia [label="" arrowhead=box color=blue penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> ia [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> ia [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [color=green penwidth=3 rank=source shape=oval]\n\t\tma -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> midcomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> midcomp [label="" arrowhead=normal color=blue penwidth=1]\n\toa -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> midcomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> midcomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [color=green penwidth=3 rank=source shape=oval]\n\t\ticomp [color=red penwidth=3 rank=max shape=rectangle]\n\t\tma -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"midcomp INPUT_CIM" -> ma [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t"midcomp PARAMETER_CIM" -> icomp [label="" arrowhead=normal color=blue penwidth=1]\n\t\t"midcomp PARAMETER_CIM" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp PARAMETER_CIM" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\ticomp -> "midcomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\t"icomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t\t"icomp INPUT_CIM" -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=blue penwidth=1]\n\t\t\t"icomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\t\tia -> "icomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\toa -> "midcomp INPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech -> "midcomp PARAMETER_CIM" [label="" arrowhead=normal color=blue penwidth=1]\n\t"midcomp OUTPUT_CIM" -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> "midcomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> "midcomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [color=green penwidth=3 rank=source shape=oval]\n\t\tma -> "icomp INPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"midcomp INPUT_CIM" -> ma [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t"midcomp PARAMETER_CIM" -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=blue penwidth=1]\n\t\t"midcomp PARAMETER_CIM" -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp PARAMETER_CIM" -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\t"icomp OUTPUT_CIM" -> "midcomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\t"icomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t\t"icomp INPUT_CIM" -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=blue penwidth=1]\n\t\t\t"icomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\t\tia -> "icomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> midcomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> midcomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [label=<
RESULT
OutputPorts
Mechanism:
ma
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t\ticomp [color=red penwidth=3 rank=max shape=rectangle]\n\t\tma:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\toa:"OutputPort-RESULT" -> ma:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [label=<
RESULT
OutputPorts
Mechanism:
ma
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t\tma:"OutputPort-RESULT" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> midcomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_oa_InputPort-0INPUT_CIM_ob_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_oc_RESULTOUTPUT_CIM_ob_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> midcomp [label="" arrowhead=normal color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_oa_InputPort-0INPUT_CIM_ob_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_oc_RESULTOUTPUT_CIM_ob_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> midcomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> midcomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [label=<
RESULT
OutputPorts
Mechanism:
ma
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t\ticomp [color=red penwidth=3 rank=max shape=rectangle]\n\t\tma:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp INPUT_CIM" [label=<
INPUT_CIM_ma_InputPort-0
OutputPorts
Mechanism:
midcomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t"midcomp INPUT_CIM":"OutputPort-INPUT_CIM_ma_InputPort-0" -> ma:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_slopePARAMETER_CIM_ia_noisePARAMETER_CIM_ia_intercept
OutputPorts
Mechanism:
midcomp Parameter_CIM
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t"midcomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> icomp [label="" arrowhead=normal color=blue penwidth=1]\n\t\t"midcomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp OUTPUT_CIM" [label=<
Mechanism:
midcomp Output_CIM
InputPorts
OUTPUT_CIM_icomp_OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\ticomp -> "midcomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_icomp_OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\t"icomp INPUT_CIM" [label=<
INPUT_CIM_ia_InputPort-0
OutputPorts
Mechanism:
icomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t\t"icomp INPUT_CIM":"OutputPort-INPUT_CIM_ia_InputPort-0" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
OutputPorts
Mechanism:
icomp Parameter_CIM
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\t\t\t"icomp OUTPUT_CIM" [label=<
Mechanism:
icomp Output_CIM
InputPorts
OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\t\tia:"OutputPort-RESULT" -> "icomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\toa:"OutputPort-RESULT" -> "midcomp INPUT_CIM":"InputPort-INPUT_CIM_ma_InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> "midcomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_slope" [label="" arrowhead=normal color=blue penwidth=1]\n\t"midcomp OUTPUT_CIM":"OutputPort-OUTPUT_CIM_icomp_OUTPUT_CIM_ia_RESULT" -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_oa_InputPort-0INPUT_CIM_ob_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_oc_RESULTOUTPUT_CIM_ob_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> "midcomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_noise" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> "midcomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_intercept" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [label=<
RESULT
OutputPorts
Mechanism:
ma
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t\tma:"OutputPort-RESULT" -> "icomp INPUT_CIM":"InputPort-INPUT_CIM_ia_InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp INPUT_CIM" [label=<
INPUT_CIM_ma_InputPort-0
OutputPorts
Mechanism:
midcomp Input_CIM
InputPorts
INPUT_CIM_ma_InputPort-0
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t"midcomp INPUT_CIM":"OutputPort-INPUT_CIM_ma_InputPort-0" -> ma:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_slopePARAMETER_CIM_ia_noisePARAMETER_CIM_ia_intercept
OutputPorts
Mechanism:
midcomp Parameter_CIM
InputPorts
PARAMETER_CIM_ia_slopePARAMETER_CIM_ia_noisePARAMETER_CIM_ia_intercept
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t"midcomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> "icomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_slope" [label="" arrowhead=normal color=blue penwidth=1]\n\t\t"midcomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> "icomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_noise" [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> "icomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_intercept" [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp OUTPUT_CIM" [label=<
OUTPUT_CIM_icomp_OUTPUT_CIM_ia_RESULT
OutputPorts
Mechanism:
midcomp Output_CIM
InputPorts
OUTPUT_CIM_icomp_OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\t"icomp OUTPUT_CIM":"OutputPort-OUTPUT_CIM_ia_RESULT" -> "midcomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_icomp_OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\t"icomp INPUT_CIM" [label=<
INPUT_CIM_ia_InputPort-0
OutputPorts
Mechanism:
icomp Input_CIM
InputPorts
INPUT_CIM_ia_InputPort-0
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t\t"icomp INPUT_CIM":"OutputPort-INPUT_CIM_ia_InputPort-0" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
OutputPorts
Mechanism:
icomp Parameter_CIM
InputPorts
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\t\t\t"icomp OUTPUT_CIM" [label=<
OUTPUT_CIM_ia_RESULT
OutputPorts
Mechanism:
icomp Output_CIM
InputPorts
OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\t\tia:"OutputPort-RESULT" -> "icomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> midcomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> midcomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [color=green penwidth=3 rank=source shape=oval]\n\t\ticomp [color=red penwidth=3 rank=max shape=rectangle]\n\t\tma -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\toa -> ma [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech -> ia [label="" arrowhead=box color=blue penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> ia [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> ia [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [color=green penwidth=3 rank=source shape=oval]\n\t\tma -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> midcomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> midcomp [label="" arrowhead=normal color=blue penwidth=1]\n\toa -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> midcomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> midcomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [color=green penwidth=3 rank=source shape=oval]\n\t\ticomp [color=red penwidth=3 rank=max shape=rectangle]\n\t\tma -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"midcomp INPUT_CIM" -> ma [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t"midcomp PARAMETER_CIM" -> icomp [label="" arrowhead=normal color=blue penwidth=1]\n\t\t"midcomp PARAMETER_CIM" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp PARAMETER_CIM" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\ticomp -> "midcomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\t"icomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t\t"icomp INPUT_CIM" -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=blue penwidth=1]\n\t\t\t"icomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\t\tia -> "icomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\toa -> "midcomp INPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech -> "midcomp PARAMETER_CIM" [label="" arrowhead=normal color=blue penwidth=1]\n\t"midcomp OUTPUT_CIM" -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> "midcomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> "midcomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [color=green penwidth=3 rank=source shape=oval]\n\t\tma -> "icomp INPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"midcomp INPUT_CIM" -> ma [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t"midcomp PARAMETER_CIM" -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=blue penwidth=1]\n\t\t"midcomp PARAMETER_CIM" -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp PARAMETER_CIM" -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\t"icomp OUTPUT_CIM" -> "midcomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\t"icomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t\t"icomp INPUT_CIM" -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=blue penwidth=1]\n\t\t\t"icomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\t\tia -> "icomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> midcomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of oa[InputPort-0]Shadowed input of of ob[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> midcomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of oa[InputPort-0]Shadowed input of of ob[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [label=<
RESULT
OutputPorts
Mechanism:
ma
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t\ticomp [color=red penwidth=3 rank=max shape=rectangle]\n\t\tma:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\toa:"OutputPort-RESULT" -> ma:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of oa[InputPort-0]Shadowed input of of ob[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [label=<
RESULT
OutputPorts
Mechanism:
ma
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t\tma:"OutputPort-RESULT" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> midcomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_oa_InputPort-0INPUT_CIM_ob_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_oc_RESULTOUTPUT_CIM_ob_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> midcomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of oa[InputPort-0]Shadowed input of of ob[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\tmidcomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> midcomp [label="" arrowhead=normal color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> midcomp [label="" arrowhead=normal color=black penwidth=1]\n\tmidcomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_oa_InputPort-0INPUT_CIM_ob_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_oc_RESULTOUTPUT_CIM_ob_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> midcomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> midcomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of oa[InputPort-0]Shadowed input of of ob[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [label=<
RESULT
OutputPorts
Mechanism:
ma
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t\ticomp [color=red penwidth=3 rank=max shape=rectangle]\n\t\tma:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp INPUT_CIM" [label=<
INPUT_CIM_ma_InputPort-0
OutputPorts
Mechanism:
midcomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t"midcomp INPUT_CIM":"OutputPort-INPUT_CIM_ma_InputPort-0" -> ma:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_slopePARAMETER_CIM_ia_noisePARAMETER_CIM_ia_intercept
OutputPorts
Mechanism:
midcomp Parameter_CIM
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t"midcomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> icomp [label="" arrowhead=normal color=blue penwidth=1]\n\t\t"midcomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp OUTPUT_CIM" [label=<
Mechanism:
midcomp Output_CIM
InputPorts
OUTPUT_CIM_icomp_OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\ticomp -> "midcomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_icomp_OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\t"icomp INPUT_CIM" [label=<
INPUT_CIM_ia_InputPort-0
OutputPorts
Mechanism:
icomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t\t"icomp INPUT_CIM":"OutputPort-INPUT_CIM_ia_InputPort-0" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
OutputPorts
Mechanism:
icomp Parameter_CIM
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\t\t\t"icomp OUTPUT_CIM" [label=<
Mechanism:
icomp Output_CIM
InputPorts
OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\t\tia:"OutputPort-RESULT" -> "icomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\toa:"OutputPort-RESULT" -> "midcomp INPUT_CIM":"InputPort-INPUT_CIM_ma_InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> "midcomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_slope" [label="" arrowhead=normal color=blue penwidth=1]\n\t"midcomp OUTPUT_CIM":"OutputPort-OUTPUT_CIM_icomp_OUTPUT_CIM_ia_RESULT" -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_oa_InputPort-0INPUT_CIM_ob_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_oc_RESULTOUTPUT_CIM_ob_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> "midcomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_noise" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> "midcomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_intercept" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of oa[InputPort-0]Shadowed input of of ob[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_midcomp {\n\t\tgraph [label=midcomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tma [label=<
RESULT
OutputPorts
Mechanism:
ma
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\t\tma:"OutputPort-RESULT" -> "icomp INPUT_CIM":"InputPort-INPUT_CIM_ia_InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp INPUT_CIM" [label=<
INPUT_CIM_ma_InputPort-0
OutputPorts
Mechanism:
midcomp Input_CIM
InputPorts
INPUT_CIM_ma_InputPort-0
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t"midcomp INPUT_CIM":"OutputPort-INPUT_CIM_ma_InputPort-0" -> ma:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"midcomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_slopePARAMETER_CIM_ia_noisePARAMETER_CIM_ia_intercept
OutputPorts
Mechanism:
midcomp Parameter_CIM
InputPorts
PARAMETER_CIM_ia_slopePARAMETER_CIM_ia_noisePARAMETER_CIM_ia_intercept
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t"midcomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> "icomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_slope" [label="" arrowhead=normal color=blue penwidth=1]\n\t\t"midcomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> "icomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_noise" [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> "icomp PARAMETER_CIM":"InputPort-PARAMETER_CIM_ia_intercept" [label="" arrowhead=normal color=purple penwidth=1]\n\t\t"midcomp OUTPUT_CIM" [label=<
OUTPUT_CIM_icomp_OUTPUT_CIM_ia_RESULT
OutputPorts
Mechanism:
midcomp Output_CIM
InputPorts
OUTPUT_CIM_icomp_OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\t"icomp OUTPUT_CIM":"OutputPort-OUTPUT_CIM_ia_RESULT" -> "midcomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_icomp_OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\tsubgraph cluster_icomp {\n\t\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\t\tedge [fontname=arial fontsize=10]\n\t\t\t"icomp INPUT_CIM" [label=<
INPUT_CIM_ia_InputPort-0
OutputPorts
Mechanism:
icomp Input_CIM
InputPorts
INPUT_CIM_ia_InputPort-0
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t\t"icomp INPUT_CIM":"OutputPort-INPUT_CIM_ia_InputPort-0" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t\t"icomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
OutputPorts
Mechanism:
icomp Parameter_CIM
InputPorts
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\t\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\t\t\t"icomp OUTPUT_CIM" [label=<
OUTPUT_CIM_ia_RESULT
OutputPorts
Mechanism:
icomp Output_CIM
InputPorts
OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\t\tia:"OutputPort-RESULT" -> "icomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\t\tcolor=red\n\t\t\tlabel=icomp\n\t\t}\n\t\tlabel=midcomp\n\t}\n}', ] @pytest.mark.parametrize( @@ -606,23 +606,26 @@ def test_of_show_3_level_nested_show_cim_and_show_node_structure( ], search_space=[[1],[1],[1]]) ocomp.add_controller(ocm) - gv = ocomp.show_graph(output_fmt='source', **show_graph_kwargs) - assert gv == expected_output + assert ocomp.controller.outcome_input_ports[0].path_afferents == [] + assert len(ocomp.controller.state_input_ports) == 2 + assert all([node in [input_port.shadow_inputs.owner for input_port in ocomp.controller.state_input_ports] + for node in {oa, ob}]) + assert gv.strip() == expected_output # each item corresponds to the same item in _nested_show_graph_kwargs above _of_show_nested_show_cim_and_show_node_structure_with_singleton_in_outer_comp_added_last_outputs = [ - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\toa -> ia [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech -> ia [label="" arrowhead=box color=blue penwidth=1]\n\tia -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> ia [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> ia [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=normal color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"icomp INPUT_CIM" -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\tia -> "icomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\toa -> "icomp INPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=blue penwidth=1]\n\t"icomp OUTPUT_CIM" -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"icomp INPUT_CIM" -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\tia -> "icomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\toa:"OutputPort-RESULT" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\tia:"OutputPort-RESULT" -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_oa_InputPort-0INPUT_CIM_ob_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_oc_RESULTOUTPUT_CIM_ob_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n}', - 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=normal color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_oa_InputPort-0INPUT_CIM_ob_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_oc_RESULTOUTPUT_CIM_ob_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [label=<
INPUT_CIM_ia_InputPort-0
OutputPorts
Mechanism:
icomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t"icomp INPUT_CIM":"OutputPort-INPUT_CIM_ia_InputPort-0" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
OutputPorts
Mechanism:
icomp Parameter_CIM
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [label=<
Mechanism:
icomp Output_CIM
InputPorts
OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\tia:"OutputPort-RESULT" -> "icomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\toa -> ia [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech -> ia [label="" arrowhead=box color=blue penwidth=1]\n\tia -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> ia [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> ia [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech -> icomp [label="" arrowhead=normal color=blue penwidth=1]\n\toa -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"icomp INPUT_CIM" -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\tia -> "icomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [color=green penwidth=3 rank=source shape=oval]\n\toa -> "icomp INPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=blue penwidth=1]\n\t"icomp OUTPUT_CIM" -> oc [label="" arrowhead=normal color=black penwidth=1]\n\toc -> ctl_mech [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t"ocomp INPUT_CIM" -> oa [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" -> ob [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\toc -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tob -> "ocomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\tocm -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> "icomp PARAMETER_CIM" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm -> oa [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM" -> ocm [label="" arrowhead=normal color=purple penwidth=1]\n\toc [color=red penwidth=3 rank=max shape=oval]\n\tctl_mech [color=blue penwidth=3 rank=max shape=octagon]\n\tob [color=brown penwidth=3 rank=same shape=oval]\n\tocm [color=purple penwidth=1 rank=min shape=doubleoctagon]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [color=green penwidth=1 rank=same shape=rectangle]\n\t\t"icomp INPUT_CIM" -> ia [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [color=purple penwidth=1 rank=same shape=rectangle]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM" -> ia [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [color=red penwidth=1 rank=same shape=rectangle]\n\t\tia -> "icomp OUTPUT_CIM" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [color=brown penwidth=3 rank=same shape=oval]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of oa[InputPort-0]Shadowed input of of ob[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of oa[InputPort-0]Shadowed input of of ob[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\toa:"OutputPort-RESULT" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\tia:"OutputPort-RESULT" -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of oa[InputPort-0]Shadowed input of of ob[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=box color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_oa_InputPort-0INPUT_CIM_ob_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_oc_RESULTOUTPUT_CIM_ob_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=box color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of oa[InputPort-0]Shadowed input of of ob[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n}', + 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\ticomp [color=pink penwidth=3 rank=same shape=rectangle]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> icomp [label="" arrowhead=normal color=blue penwidth=1]\n\toa:"OutputPort-RESULT" -> icomp [label="" arrowhead=normal color=black penwidth=1]\n\ticomp -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM" [label=<
INPUT_CIM_oa_InputPort-0INPUT_CIM_ob_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT_CIM" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_oc_RESULTOUTPUT_CIM_ob_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> icomp [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_oa_InputPort-0" -> ocm:"InputPort-Shadowed input of of oa[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\t"ocomp INPUT_CIM":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ocm:"InputPort-Shadowed input of of ob[InputPort-0]" [label="" arrowhead=normal color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
ParameterPorts
seed
InputPorts
OUTCOMEShadowed input of of oa[InputPort-0]Shadowed input of of ob[InputPort-0]
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT_CIM" [label=<
INPUT_CIM_ia_InputPort-0
OutputPorts
Mechanism:
icomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t"icomp INPUT_CIM":"OutputPort-INPUT_CIM_ia_InputPort-0" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp PARAMETER_CIM" [label=<
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
OutputPorts
Mechanism:
icomp Parameter_CIM
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_intercept" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_noise" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp PARAMETER_CIM":"OutputPort-PARAMETER_CIM_ia_slope" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT_CIM" [label=<
Mechanism:
icomp Output_CIM
InputPorts
OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\tia:"OutputPort-RESULT" -> "icomp OUTPUT_CIM":"InputPort-OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}', # FIX: NEEDS TO BE CORRECTED ONCE BUG IS FIXED (SEE MESSAGE FOR COMMIT eb61303808ad2a5ba46fdd18d0e583283397915c) # 'digraph ocomp {\n\tgraph [label=ocomp overlap=False rankdir=BT]\n\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\tedge [fontname=arial fontsize=10]\n\toa [label=<
RESULT
OutputPorts
Mechanism:
oa
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=green penwidth=3 rank=source shape=plaintext]\n\toa:"OutputPort-RESULT" -> "icomp INPUT":"InputPort-INPUT_CIM_ia_InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\tctl_mech:"OutputPort-ia[slope] ControlSignal" -> "icomp CONTROL":"InputPort-PARAMETER_CIM_ia_slope" [label="" arrowhead=normal color=blue penwidth=1]\n\t"icomp OUTPUT":"OutputPort-OUTPUT_CIM_ia_RESULT" -> oc:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\toc:"OutputPort-RESULT" -> ctl_mech:"InputPort-OUTCOME" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT" [label=<
INPUT_CIM_oa_InputPort-0INPUT_CIM_ob_InputPort-0
OutputPorts
Mechanism:
ocomp Input_CIM
> color=green penwidth=1 rank=same shape=plaintext]\n\t"ocomp INPUT":"OutputPort-INPUT_CIM_oa_InputPort-0" -> oa:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp INPUT":"OutputPort-INPUT_CIM_ob_InputPort-0" -> ob:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t"ocomp OUTPUT" [label=<
Mechanism:
ocomp Output_CIM
InputPorts
OUTPUT_CIM_oc_RESULTOUTPUT_CIM_ob_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\toc:"OutputPort-RESULT" -> "ocomp OUTPUT":"InputPort-OUTPUT_CIM_oc_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tob:"OutputPort-RESULT" -> "ocomp OUTPUT":"InputPort-OUTPUT_CIM_ob_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\tocm:"OutputPort-ia[noise] ControlSignal" -> "icomp CONTROL":"InputPort-PARAMETER_CIM_ia_noise" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-ia[intercept] ControlSignal" -> "icomp CONTROL":"InputPort-PARAMETER_CIM_ia_intercept" [label="" arrowhead=normal color=purple penwidth=1]\n\tocm:"OutputPort-oa[slope] ControlSignal" -> oa:"ParameterPort-slope" [label="" arrowhead=box color=purple penwidth=1]\n\toc [label=<
RESULT
OutputPorts
Mechanism:
oc
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=red penwidth=3 rank=max shape=plaintext]\n\tctl_mech [label=<
ia[slope] ControlSignal
OutputPorts
Mechanism:
ctl_mech
InputPorts
OUTCOME
> color=blue penwidth=3 rank=max shape=plaintext]\n\tob [label=<
RESULT
OutputPorts
Mechanism:
ob
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\tocm [label=<
ia[noise] ControlSignalia[intercept] ControlSignaloa[slope] ControlSignal
OutputPorts
Mechanism:
ocm
InputPorts
OUTCOMEOUTCOME-1
> color=purple penwidth=1 rank=min shape=plaintext]\n\tsubgraph cluster_icomp {\n\t\tgraph [label=icomp overlap=False rankdir=BT]\n\t\tnode [color=black fontname=arial fontsize=12 penwidth=1 shape=record]\n\t\tedge [fontname=arial fontsize=10]\n\t\t"icomp INPUT" [label=<
INPUT_CIM_ia_InputPort-0
OutputPorts
Mechanism:
icomp Input_CIM
InputPorts
INPUT_CIM_ia_InputPort-0
> color=green penwidth=1 rank=same shape=plaintext]\n\t\t"icomp INPUT":"OutputPort-INPUT_CIM_ia_InputPort-0" -> ia:"InputPort-InputPort-0" [label="" arrowhead=normal color=black penwidth=1]\n\t\t"icomp CONTROL" [label=<
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
OutputPorts
Mechanism:
icomp Parameter_CIM
InputPorts
PARAMETER_CIM_ia_interceptPARAMETER_CIM_ia_noisePARAMETER_CIM_ia_slope
> color=purple penwidth=1 rank=same shape=plaintext]\n\t\t"icomp CONTROL":"OutputPort-PARAMETER_CIM_ia_intercept" -> ia:"ParameterPort-intercept" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp CONTROL":"OutputPort-PARAMETER_CIM_ia_noise" -> ia:"ParameterPort-noise" [label="" arrowhead=box color=purple penwidth=1]\n\t\t"icomp CONTROL":"OutputPort-PARAMETER_CIM_ia_slope" -> ia:"ParameterPort-slope" [label="" arrowhead=box color=blue penwidth=1]\n\t\t"icomp OUTPUT" [label=<
OUTPUT_CIM_ia_RESULT
OutputPorts
Mechanism:
icomp Output_CIM
InputPorts
OUTPUT_CIM_ia_RESULT
> color=red penwidth=1 rank=same shape=plaintext]\n\t\tia:"OutputPort-RESULT" -> "icomp OUTPUT":"InputPort-OUTPUT_CIM_ia_RESULT" [label="" arrowhead=normal color=black penwidth=1]\n\t\tia [label=<
RESULT
OutputPorts
Mechanism:
ia
ParameterPorts
intercept
noise
offset
rate
slope
InputPorts
InputPort-0
> color=brown penwidth=3 rank=same shape=plaintext]\n\t\tlabel=icomp\n\t}\n}' ] @@ -662,4 +665,4 @@ def test_of_show_nested_show_cim_and_show_node_structure_with_singleton_in_outer search_space=[[1],[1],[1]]) ocomp.add_controller(ocm) gv = ocomp.show_graph(output_fmt='source', **show_graph_kwargs) - assert gv == expected_output + assert gv.strip() == expected_output diff --git a/tests/functions/test_combination.py b/tests/functions/test_combination.py index 96d75490d5b..f564b20abbe 100644 --- a/tests/functions/test_combination.py +++ b/tests/functions/test_combination.py @@ -150,6 +150,7 @@ def test_matrix(self): # # print("mech = ", R_mechanism.execute([[[1, 2], [3, 4, 5], [6, 7, 8, 9]]])) # + SIZE=5 np.random.seed(0) #This gives us the correct 2d array diff --git a/tests/functions/test_distance.py b/tests/functions/test_distance.py index 9aed1fa88fd..d2c643ac906 100644 --- a/tests/functions/test_distance.py +++ b/tests/functions/test_distance.py @@ -17,6 +17,7 @@ def correlation(v1,v2): v2_norm = v2 - np.mean(v2) return np.sum(v1_norm * v2_norm) / np.sqrt(np.sum(v1_norm**2) * np.sum(v2_norm**2)) + test_data = [ (kw.MAX_ABS_DIFF, False, None, np.max(abs(v1 - v2))), (kw.MAX_ABS_DIFF, True, None, np.max(abs(v1 - v2))), diff --git a/tests/functions/test_distribution.py b/tests/functions/test_distribution.py index 35df6974d54..12f5d64d16b 100644 --- a/tests/functions/test_distribution.py +++ b/tests/functions/test_distribution.py @@ -3,6 +3,7 @@ import psyneulink.core.llvm as pnlvm import psyneulink.core.components.functions.nonstateful.distributionfunctions as Functions +from psyneulink.core.globals.utilities import _SeededPhilox np.random.seed(0) test_var = np.random.rand() @@ -26,6 +27,13 @@ # Two tests with different inputs to show that input is ignored. (Functions.NormalDist, 1e14, {"mean": RAND1, "standard_deviation": RAND2}, None, (1.0890232855122397)), (Functions.NormalDist, 1e-4, {"mean": RAND1, "standard_deviation": RAND2}, None, (1.0890232855122397)), + (Functions.UniformDist, 1e14, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, None, (0.6879771504250405)), + (Functions.UniformDist, 1e-4, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, None, (0.6879771504250405)), + # Inf inputs select Philox PRNG, test_var should never be inf + (Functions.NormalDist, np.inf, {"mean": RAND1, "standard_deviation": RAND2}, None, (0.5910357654927911)), + (Functions.NormalDist, -np.inf, {"mean": RAND1, "standard_deviation": RAND2}, None, (0.5910357654927911)), + (Functions.UniformDist, np.inf, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, None, (0.6043448764869507)), + (Functions.UniformDist, -np.inf, {"low": min(RAND1, RAND2), "high": max(RAND1, RAND2)}, None, (0.6043448764869507)), ] # use list, naming function produces ugly names @@ -36,8 +44,15 @@ # "DriftDiffusionAnalytical-SmallDriftRate", "NormalDist1", "NormalDist2", + "UniformDist1", + "UniformDist2", + "NormalDist1 Philox", + "NormalDist2 Philox", + "UniformDist1 Philox", + "UniformDist2 Philox", ] + @pytest.mark.function @pytest.mark.transfer_function @pytest.mark.benchmark @@ -46,7 +61,11 @@ def test_execute(func, variable, params, llvm_skip, expected, benchmark, func_mo benchmark.group = "TransferFunction " + func.componentName if func_mode != 'Python' and llvm_skip: pytest.skip(llvm_skip) + f = func(default_variable=variable, **params) + if np.isinf(variable): + f.parameters.random_state.set(_SeededPhilox([0])) + ex = pytest.helpers.get_func_execution(f, func_mode) res = ex(variable) diff --git a/tests/functions/test_integrator.py b/tests/functions/test_integrator.py index 13fabf86a30..78f19dabcb4 100644 --- a/tests/functions/test_integrator.py +++ b/tests/functions/test_integrator.py @@ -131,6 +131,7 @@ def LeakyFun(init, value, iterations, noise, **kwargs): return [3.12748415, 2.76778478, 2.45911505, 3.06686514, 1.6311395, 2.19281309, 1.61148745, 3.23404557, 2.81418859, 2.63042344] + GROUP_PREFIX="IntegratorFunction " @@ -220,6 +221,7 @@ def test_integrator_function_with_default_variable_and_params_of_different_lengt assert error_msg_a in str(error_text.value) assert error_msg_b in str(error_text.value) + err_msg_initializer = "'initializer' must be a list or 1d array of length 3 (the value of the 'dimension' parameter minus 1)" err_msg_angle_func = 'Variable shape incompatibility between (DriftOnASphereIntegrator DriftOnASphereIntegrator' err_msg_noise = "must be a list or 1d array of length 3 (the value of the 'dimension' parameter minus 1)" diff --git a/tests/functions/test_memory.py b/tests/functions/test_memory.py index adb831c0448..c3f61f6e590 100644 --- a/tests/functions/test_memory.py +++ b/tests/functions/test_memory.py @@ -6,6 +6,7 @@ import psyneulink.core.components.functions.stateful.memoryfunctions as Functions import psyneulink.core.llvm as pnlvm from psyneulink import * +from psyneulink.core.globals.utilities import _SeededPhilox # ********************************************************************************************************************** # OMINBUS TEST ********************************************************************************************************* @@ -17,29 +18,45 @@ np.random.seed(0) SIZE=10 test_var = np.random.rand(2, SIZE) +#TODO: Initializer should use different values to test recall test_initializer = np.array([[test_var[0], test_var[1]]]) test_noise_arr = np.random.rand(SIZE) RAND1 = np.random.random(1) RAND2 = np.random.random() +philox_var = np.random.rand(2, SIZE) +#TODO: Initializer should use different values to test recall +philox_initializer = np.array([[philox_var[0], philox_var[1]]]) + test_data = [ # Default initializer does not work # (Functions.Buffer, test_var, {'rate':RAND1}, [[0.0],[0.0]]), - (Functions.Buffer, test_var[0], {'history':512, 'rate':RAND1, 'initializer':[test_var[0]]}, [[0.03841128, 0.05005587, 0.04218721, 0.0381362 , 0.02965146, 0.04520592, 0.03062659, 0.0624149 , 0.06744644, 0.02683695],[0.14519169, 0.18920736, 0.15946443, 0.1441519 , 0.11208025, 0.17087491, 0.11576615, 0.23592355, 0.25494239, 0.10144161]]), - (Functions.DictionaryMemory, test_var, {'rate':RAND1, 'seed': module_seed}, [[ - 0.5488135039273248, 0.7151893663724195, 0.6027633760716439, 0.5448831829968969, 0.4236547993389047, 0.6458941130666561, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293, 0.3834415188257777], [ - 0.7917250380826646, 0.5288949197529045, 0.5680445610939323, 0.925596638292661, 0.07103605819788694, 0.08712929970154071, 0.02021839744032572, 0.832619845547938, 0.7781567509498505, 0.8700121482468192 ]]), - (Functions.DictionaryMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.5, 'seed': module_seed}, - [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]]), - (Functions.DictionaryMemory, test_var, {'rate':RAND1, 'storage_prob':0.1, 'seed': module_seed}, - [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]]), - (Functions.DictionaryMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.9, 'storage_prob':0.9, 'seed': module_seed}, [[ - 0.5488135039273248, 0.7151893663724195, 0.6027633760716439, 0.5448831829968969, 0.4236547993389047, 0.6458941130666561, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293, 0.3834415188257777], [ - 0.7917250380826646, 0.5288949197529045, 0.5680445610939323, 0.925596638292661, 0.07103605819788694, 0.08712929970154071, 0.02021839744032572, 0.832619845547938, 0.7781567509498505, 0.8700121482468192 ]]), - (Functions.DictionaryMemory, test_var, {'initializer':test_initializer, 'rate':RAND1, 'seed': module_seed}, [[ - 0.5488135039273248, 0.7151893663724195, 0.6027633760716439, 0.5448831829968969, 0.4236547993389047, 0.6458941130666561, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293, 0.3834415188257777], [ - 0.7917250380826646, 0.5288949197529045, 0.5680445610939323, 0.925596638292661, 0.07103605819788694, 0.08712929970154071, 0.02021839744032572, 0.832619845547938, 0.7781567509498505, 0.8700121482468192 ]]), + pytest.param(Functions.Buffer, test_var[0], {'history':512, 'rate':RAND1, 'initializer':[test_var[0]]}, + [[0.03841128, 0.05005587, 0.04218721, 0.0381362 , 0.02965146, 0.04520592, 0.03062659, 0.0624149 , 0.06744644, 0.02683695], + [0.14519169, 0.18920736, 0.15946443, 0.1441519 , 0.11208025, 0.17087491, 0.11576615, 0.23592355, 0.25494239, 0.10144161]], id="Buffer"), + pytest.param(Functions.DictionaryMemory, test_var, {'seed': module_seed}, + [[0.5488135039273248, 0.7151893663724195, 0.6027633760716439, 0.5448831829968969, 0.4236547993389047, 0.6458941130666561, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293, 0.3834415188257777], + [0.7917250380826646, 0.5288949197529045, 0.5680445610939323, 0.925596638292661, 0.07103605819788694, 0.08712929970154071, 0.02021839744032572, 0.832619845547938, 0.7781567509498505, 0.8700121482468192 ]], + id="DictionaryMemory"), + pytest.param(Functions.DictionaryMemory, test_var, {'rate':RAND1, 'seed': module_seed}, + [[0.5488135039273248, 0.7151893663724195, 0.6027633760716439, 0.5448831829968969, 0.4236547993389047, 0.6458941130666561, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293, 0.3834415188257777], + [0.7917250380826646, 0.5288949197529045, 0.5680445610939323, 0.925596638292661, 0.07103605819788694, 0.08712929970154071, 0.02021839744032572, 0.832619845547938, 0.7781567509498505, 0.8700121482468192]], + id="DictionaryMemory Rate"), + pytest.param(Functions.DictionaryMemory, test_var, {'initializer':test_initializer, 'rate':RAND1, 'seed': module_seed}, + [[0.5488135039273248, 0.7151893663724195, 0.6027633760716439, 0.5448831829968969, 0.4236547993389047, 0.6458941130666561, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293, 0.3834415188257777], + [0.7917250380826646, 0.5288949197529045, 0.5680445610939323, 0.925596638292661, 0.07103605819788694, 0.08712929970154071, 0.02021839744032572, 0.832619845547938, 0.7781567509498505, 0.8700121482468192]], + id="DictionaryMemory Initializer"), + pytest.param(Functions.DictionaryMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.5, 'seed': module_seed}, + [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]], + id="DictionaryMemory Low Retrieval"), + pytest.param(Functions.DictionaryMemory, test_var, {'rate':RAND1, 'storage_prob':0.1, 'seed': module_seed}, + [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]], + id="DictionaryMemory Low Storage"), + pytest.param(Functions.DictionaryMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.9, 'storage_prob':0.9, 'seed': module_seed}, + [[0.5488135039273248, 0.7151893663724195, 0.6027633760716439, 0.5448831829968969, 0.4236547993389047, 0.6458941130666561, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293, 0.3834415188257777], + [0.7917250380826646, 0.5288949197529045, 0.5680445610939323, 0.925596638292661, 0.07103605819788694, 0.08712929970154071, 0.02021839744032572, 0.832619845547938, 0.7781567509498505, 0.8700121482468192]], + id="DictionaryMemory High Storage/Retrieve"), # Disable noise tests for now as they trigger failure in DictionaryMemory lookup # (Functions.DictionaryMemory, test_var, {'rate':RAND1, 'noise':RAND2}, [[ # 0.79172504, 0.52889492, 0.56804456, 0.92559664, 0.07103606, 0.0871293 , 0.0202184 , 0.83261985, 0.77815675, 0.87001215 ],[ @@ -53,45 +70,75 @@ # 0.79172504, 0.52889492, 0.56804456, 0.92559664, 0.07103606, 0.0871293 , 0.0202184 , 0.83261985, 0.77815675, 0.87001215 ],[ # 1.3230471933615413, 1.4894230558066361, 1.3769970655058605, 1.3191168724311135, 1.1978884887731214, 1.4201278025008728, 1.2118209006969092, 1.6660066902162964, 1.737896449935246, 1.1576752082599944 #]]), - (Functions.DictionaryMemory, test_var, {'rate':RAND1, 'seed': module_seed}, [[ - 0.5488135039273248, 0.7151893663724195, 0.6027633760716439, 0.5448831829968969, 0.4236547993389047, 0.6458941130666561, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293, 0.3834415188257777], [ - 0.7917250380826646, 0.5288949197529045, 0.5680445610939323, 0.925596638292661, 0.07103605819788694, 0.08712929970154071, 0.02021839744032572, 0.832619845547938, 0.7781567509498505, 0.8700121482468192 ]]), - (Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.5, 'seed': module_seed}, - [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]]), - (Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'storage_prob':0.1, 'seed': module_seed}, - [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]]), - (Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.9, 'storage_prob':0.9, 'seed': module_seed}, [[ - 0.5488135039273248, 0.7151893663724195, 0.6027633760716439, 0.5448831829968969, 0.4236547993389047, 0.6458941130666561, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293, 0.3834415188257777], [ - 0.7917250380826646, 0.5288949197529045, 0.5680445610939323, 0.925596638292661, 0.07103605819788694, 0.08712929970154071, 0.02021839744032572, 0.832619845547938, 0.7781567509498505, 0.8700121482468192 ]]), - (Functions.ContentAddressableMemory, test_var, {'initializer':test_initializer, 'rate':RAND1, 'seed': module_seed}, [[ - 0.5488135039273248, 0.7151893663724195, 0.6027633760716439, 0.5448831829968969, 0.4236547993389047, 0.6458941130666561, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293, 0.3834415188257777], [ - 0.7917250380826646, 0.5288949197529045, 0.5680445610939323, 0.925596638292661, 0.07103605819788694, 0.08712929970154071, 0.02021839744032572, 0.832619845547938, 0.7781567509498505, 0.8700121482468192 ]]), -] - -# use list, naming function produces ugly names -names = [ - "Buffer", -# "Buffer Initializer", - "DictionaryMemory", - "DictionaryMemory Random Retrieval", - "DictionaryMemory Random Storage", - "DictionaryMemory Random Retrieval-Storage", - "DictionaryMemory Initializer", -# "DictionaryMemory Noise", -# "DictionaryMemory Noise Random Retrieval", -# "DictionaryMemory Noise Random Storage", -# "DictionaryMemory Initializer Noise", - "ContentAddressableMemory", - "ContentAddressableMemory Random Retrieval", - "ContentAddressableMemory Random Storage", - "ContentAddressableMemory Random Retrieval-Storage", - "ContentAddressableMemory Initializer", + pytest.param(Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.5, 'seed': module_seed}, + [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]], + id="ContentAddressableMemory Low Retrieval"), + pytest.param(Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'storage_prob':0.1, 'seed': module_seed}, + [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]], + id="ContentAddressableMemory Low Storage"), + pytest.param(Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.9, 'storage_prob':0.9, 'seed': module_seed}, + [[0.5488135039273248, 0.7151893663724195, 0.6027633760716439, 0.5448831829968969, 0.4236547993389047, 0.6458941130666561, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293, 0.3834415188257777], + [0.7917250380826646, 0.5288949197529045, 0.5680445610939323, 0.925596638292661, 0.07103605819788694, 0.08712929970154071, 0.02021839744032572, 0.832619845547938, 0.7781567509498505, 0.8700121482468192]], + id="ContentAddressableMemory High Storage/Retrieval"), + pytest.param(Functions.ContentAddressableMemory, test_var, {'initializer':test_initializer, 'rate':RAND1, 'seed': module_seed}, + [[0.5488135039273248, 0.7151893663724195, 0.6027633760716439, 0.5448831829968969, 0.4236547993389047, 0.6458941130666561, 0.4375872112626925, 0.8917730007820798, 0.9636627605010293, 0.3834415188257777], + [0.7917250380826646, 0.5288949197529045, 0.5680445610939323, 0.925596638292661, 0.07103605819788694, 0.08712929970154071, 0.02021839744032572, 0.832619845547938, 0.7781567509498505, 0.8700121482468192]], + id="ContentAddressableMemory Initializer"), + pytest.param(Functions.DictionaryMemory, philox_var, {'seed': module_seed}, + [[0.45615033221654855, 0.5684339488686485, 0.018789800436355142, 0.6176354970758771, 0.6120957227224214, 0.6169339968747569, 0.9437480785146242, 0.6818202991034834, 0.359507900573786, 0.43703195379934145], + [0.6976311959272649, 0.06022547162926983, 0.6667667154456677, 0.6706378696181594, 0.2103825610738409, 0.1289262976548533, 0.31542835092418386, 0.3637107709426226, 0.5701967704178796, 0.43860151346232035]], + id="DictionaryMemory (Philox)"), + pytest.param(Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'seed': module_seed}, + [[0.45615033221654855, 0.5684339488686485, 0.018789800436355142, 0.6176354970758771, 0.6120957227224214, 0.6169339968747569, 0.9437480785146242, 0.6818202991034834, 0.359507900573786, 0.43703195379934145], + [0.6976311959272649, 0.06022547162926983, 0.6667667154456677, 0.6706378696181594, 0.2103825610738409, 0.1289262976548533, 0.31542835092418386, 0.3637107709426226, 0.5701967704178796, 0.43860151346232035]], + id="DictionaryMemory Rate (Philox)"), + pytest.param(Functions.DictionaryMemory, philox_var, {'initializer':test_initializer, 'rate':RAND1, 'seed': module_seed}, + [[0.45615033221654855, 0.5684339488686485, 0.018789800436355142, 0.6176354970758771, 0.6120957227224214, 0.6169339968747569, 0.9437480785146242, 0.6818202991034834, 0.359507900573786, 0.43703195379934145], + [0.6976311959272649, 0.06022547162926983, 0.6667667154456677, 0.6706378696181594, 0.2103825610738409, 0.1289262976548533, 0.31542835092418386, 0.3637107709426226, 0.5701967704178796, 0.43860151346232035]], + id="DictionaryMemory Initializer (Philox)"), + pytest.param(Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.1, 'seed': module_seed}, + [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]], + id="DictionaryMemory Low Retrieval (Philox)"), + pytest.param(Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'storage_prob':0.01, 'seed': module_seed}, + [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]], + id="DictionaryMemory Low Storage (Philox)"), + pytest.param(Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.9, 'storage_prob':0.9, 'seed': module_seed}, + [[0.45615033221654855, 0.5684339488686485, 0.018789800436355142, 0.6176354970758771, 0.6120957227224214, 0.6169339968747569, 0.9437480785146242, 0.6818202991034834, 0.359507900573786, 0.43703195379934145], + [0.6976311959272649, 0.06022547162926983, 0.6667667154456677, 0.6706378696181594, 0.2103825610738409, 0.1289262976548533, 0.31542835092418386, 0.3637107709426226, 0.5701967704178796, 0.43860151346232035]], + id="DictionaryMemory High Storage/Retrieve (Philox)"), +# Disable noise tests for now as they trigger failure in DictionaryMemory lookup +# (Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'noise':RAND2}, [[ +# 0.79172504, 0.52889492, 0.56804456, 0.92559664, 0.07103606, 0.0871293 , 0.0202184 , 0.83261985, 0.77815675, 0.87001215 ],[ +# 1.3230471933615413, 1.4894230558066361, 1.3769970655058605, 1.3191168724311135, 1.1978884887731214, 1.4201278025008728, 1.2118209006969092, 1.6660066902162964, 1.737896449935246, 1.1576752082599944 +#]]), +# (Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'noise':[RAND2], 'retrieval_prob':0.5}, +# [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]]), +# (Functions.DictionaryMemory, philox_var, {'rate':RAND1, 'noise':RAND2, 'storage_prob':0.5}, +# [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]]), +# (Functions.DictionaryMemory, philox_var, {'initializer':test_initializer, 'rate':RAND1, 'noise':RAND2}, [[ +# 0.79172504, 0.52889492, 0.56804456, 0.92559664, 0.07103606, 0.0871293 , 0.0202184 , 0.83261985, 0.77815675, 0.87001215 ],[ +# 1.3230471933615413, 1.4894230558066361, 1.3769970655058605, 1.3191168724311135, 1.1978884887731214, 1.4201278025008728, 1.2118209006969092, 1.6660066902162964, 1.737896449935246, 1.1576752082599944 +#]]), + pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.1, 'seed': module_seed}, + [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]], + id="ContentAddressableMemory Low Retrieval (Philox)"), + pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'storage_prob':0.01, 'seed': module_seed}, + [[ 0. for i in range(SIZE) ],[ 0. for i in range(SIZE) ]], + id="ContentAddressableMemory Low Storage (Philox)"), + pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.9, 'storage_prob':0.9, 'seed': module_seed}, + [[0.45615033221654855, 0.5684339488686485, 0.018789800436355142, 0.6176354970758771, 0.6120957227224214, 0.6169339968747569, 0.9437480785146242, 0.6818202991034834, 0.359507900573786, 0.43703195379934145], + [0.6976311959272649, 0.06022547162926983, 0.6667667154456677, 0.6706378696181594, 0.2103825610738409, 0.1289262976548533, 0.31542835092418386, 0.3637107709426226, 0.5701967704178796, 0.43860151346232035]], + id="ContentAddressableMemory High Storage/Retrieval (Philox)"), + pytest.param(Functions.ContentAddressableMemory, philox_var, {'initializer':test_initializer, 'rate':RAND1, 'seed': module_seed}, + [[0.45615033221654855, 0.5684339488686485, 0.018789800436355142, 0.6176354970758771, 0.6120957227224214, 0.6169339968747569, 0.9437480785146242, 0.6818202991034834, 0.359507900573786, 0.43703195379934145], + [0.6976311959272649, 0.06022547162926983, 0.6667667154456677, 0.6706378696181594, 0.2103825610738409, 0.1289262976548533, 0.31542835092418386, 0.3637107709426226, 0.5701967704178796, 0.43860151346232035]], + id="ContentAddressableMemory Initializer (Philox)"), ] @pytest.mark.function @pytest.mark.memory_function @pytest.mark.benchmark -@pytest.mark.parametrize("func, variable, params, expected", test_data, ids=names) +@pytest.mark.parametrize("func, variable, params, expected", test_data) def test_basic(func, variable, params, expected, benchmark, func_mode): if func is Functions.Buffer and func_mode != 'Python': pytest.skip("Not implemented") @@ -100,6 +147,8 @@ def test_basic(func, variable, params, expected, benchmark, func_mode): benchmark.group = func.componentName f = func(default_variable=variable, **params) + if variable is philox_var: + f.parameters.random_state.set(_SeededPhilox([module_seed])) EX = pytest.helpers.get_func_execution(f, func_mode) EX(variable) diff --git a/tests/functions/test_selection.py b/tests/functions/test_selection.py index 15a2c22f6f8..1dc450f9989 100644 --- a/tests/functions/test_selection.py +++ b/tests/functions/test_selection.py @@ -4,11 +4,17 @@ import psyneulink.core.components.functions.nonstateful.selectionfunctions as Functions import psyneulink.core.globals.keywords as kw import psyneulink.core.llvm as pnlvm +from psyneulink.core.globals.utilities import _SeededPhilox np.random.seed(0) SIZE=10 test_var = np.random.rand(SIZE) * 2.0 - 1.0 + +# the sum of probs should be 1.0 test_prob = np.random.rand(SIZE) +test_prob /= sum(test_prob) +test_philox = np.random.rand(SIZE) +test_philox /= sum(test_philox) test_data = [ (Functions.OneHot, test_var, {'mode':kw.MAX_VAL}, [0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.]), @@ -19,8 +25,10 @@ (Functions.OneHot, test_var, {'mode':kw.MIN_ABS_VAL}, [0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.]), (Functions.OneHot, test_var, {'mode':kw.MIN_INDICATOR}, [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]), (Functions.OneHot, test_var, {'mode':kw.MIN_ABS_INDICATOR}, [0., 0., 0., 1.,0., 0., 0., 0., 0., 0.]), - (Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB}, [0.09762701, 0., 0., 0., 0., 0., 0., 0., 0., 0.]), - (Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB_INDICATOR}, [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.]), + (Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB}, [0., 0., 0., 0.08976636599379373, 0., 0., 0., 0., 0., 0.]), + (Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB_INDICATOR}, [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.]), + (Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB}, [0., 0.43037873274483895, 0., 0., 0., 0., 0., 0., 0., 0.]), + (Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB_INDICATOR}, [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]), ] # use list, naming function produces ugly names @@ -35,6 +43,8 @@ "OneHot MIN_ABS_INDICATOR", "OneHot PROB", "OneHot PROB_INDICATOR", + "OneHot PROB PHILOX", + "OneHot PROB_INDICATOR PHILOX", ] GROUP_PREFIX="SelectionFunction " @@ -47,6 +57,9 @@ def test_basic(func, variable, params, expected, benchmark, func_mode): benchmark.group = GROUP_PREFIX + func.componentName + params['mode'] f = func(default_variable=variable, **params) + if len(variable) == 2 and variable[1] is test_philox: + f.parameters.random_state.set(_SeededPhilox([0])) + EX = pytest.helpers.get_func_execution(f, func_mode) EX(variable) diff --git a/tests/functions/test_user_defined_func.py b/tests/functions/test_user_defined_func.py index aad14c76de7..a11907e944c 100644 --- a/tests/functions/test_user_defined_func.py +++ b/tests/functions/test_user_defined_func.py @@ -476,6 +476,7 @@ def myFunction(variable): @pytest.mark.parametrize("op,variable,expected", [ # parameter is string since compiled udf doesn't support closures as of present ("TANH", [[1, 3]], [0.76159416, 0.99505475]), ("EXP", [[1, 3]], [2.71828183, 20.08553692]), + ("SQRT", [[1, 3]], [1.0, 1.7320508075688772]), ("SHAPE", [1, 2], [2]), ("SHAPE", [[1, 3]], [1, 2]), ("ASTYPE_FLOAT", [1], [1.0]), @@ -504,6 +505,9 @@ def myFunction(variable): elif op == "EXP": def myFunction(variable): return np.exp(variable) + elif op == "SQRT": + def myFunction(variable): + return np.sqrt(variable) elif op == "SHAPE": def myFunction(variable): return variable.shape @@ -644,6 +648,60 @@ def myFunction(variable, x): assert U.function(0) == 0 +@pytest.mark.parametrize( + 'expression, parameters, result', + [ + ('x + y', {'x': 2, 'y': 4}, 6), + ('(x + y) * z', {'x': 2, 'y': 4, 'z': 2}, 12), + ('x + f(3)', {'x': 1, 'f': lambda x: x}, 4), + ('x + f (3)', {'x': 1, 'f': lambda x: x}, 4), + ('np.sum([int(x), 2])', {'x': 1, 'np': np}, 3), + ( + '(x * y) / 3 + f(z_0, z) + z0 - (x**y) * VAR', + {'x': 2, 'y': 3, 'f': lambda a, b: a + b, 'z_0': 1, 'z0': 1, 'z': 1, 'VAR': 1}, + -3 + ) + ] +) +@pytest.mark.parametrize('explicit_udf', [True, False]) +def test_expression_execution(expression, parameters, result, explicit_udf): + if explicit_udf: + u = UserDefinedFunction(custom_function=expression, **parameters) + else: + m = ProcessingMechanism(function=expression, **parameters) + u = m.function + + for p in parameters: + assert p in u.cust_fct_params + + assert u.execute() == result + + +def _function_test_integration(variable, x, y, z): + return x * y + z + + +@pytest.mark.parametrize( + 'function', + [ + (lambda variable, x, y, z: x * y + z), + 'x * y + z', + _function_test_integration + ] +) +def test_integration(function): + u = UserDefinedFunction( + custom_function=function, + x=2, + y=3, + z=5, + stateful_parameter='x' + ) + + assert u.execute() == 11 + assert u.execute() == 38 + + class TestUserDefFunc: def test_udf_creates_parameter_ports(self): def func(input=[[0], [0]], p=0, q=1): diff --git a/tests/json/model_with_control.py b/tests/json/model_with_control.py index b432363cc59..17240875340 100644 --- a/tests/json/model_with_control.py +++ b/tests/json/model_with_control.py @@ -48,7 +48,7 @@ controller=pnl.OptimizationControlMechanism( agent_rep=comp, state_features=[Input.input_port, reward.input_port], - state_feature_function=pnl.AdaptiveIntegrator(rate=0.5), + state_feature_functions=pnl.AdaptiveIntegrator(rate=0.5), objective_mechanism=pnl.ObjectiveMechanism( function=pnl.LinearCombination(operation=pnl.PRODUCT), monitor=[ diff --git a/tests/json/test_json.py b/tests/json/test_json.py index c7a159c1af6..09c9f700215 100644 --- a/tests/json/test_json.py +++ b/tests/json/test_json.py @@ -18,19 +18,19 @@ json_results_parametrization = [ - ('model_basic.py', 'comp', '{A: 1}'), - ('model_nested_comp_with_scheduler.py', 'comp', '{A: 1}'), - ( - 'model_with_control.py', - 'comp', - '{Input: [0.5, 0.123], reward: [20, 20]}' - ), + # ('model_basic.py', 'comp', '{A: 1}'), + # ('model_nested_comp_with_scheduler.py', 'comp', '{A: 1}'), + # ( + # 'model_with_control.py', + # 'comp', + # '{Input: [0.5, 0.123], reward: [20, 20]}' + # ), ( 'stroop_conflict_monitoring.py', 'Stroop_model', str(stroop_stimuli).replace("'", '') ), - ('model_backprop.py', 'comp', '{a: [1, 2, 3]}'), + # ('model_backprop.py', 'comp', '{a: [1, 2, 3]}'), ] diff --git a/tests/llvm/test_builtins_intrinsics.py b/tests/llvm/test_builtins_intrinsics.py index 52eafc104e5..37d0204656b 100644 --- a/tests/llvm/test_builtins_intrinsics.py +++ b/tests/llvm/test_builtins_intrinsics.py @@ -23,7 +23,7 @@ def test_builtin_op(benchmark, op, args, builtin, result, func_mode): f = pnlvm.LLVMBinaryFunction.get(builtin) elif func_mode == 'PTX': wrap_name = builtin + "_test_wrapper" - with pnlvm.LLVMBuilderContext.get_global() as ctx: + with pnlvm.LLVMBuilderContext.get_current() as ctx: intrin = ctx.import_llvm_function(builtin) wrap_args = (*intrin.type.pointee.args, intrin.type.pointee.return_type.as_pointer()) diff --git a/tests/llvm/test_builtins_matrix.py b/tests/llvm/test_builtins_matrix.py index f54fc1efd4c..5660fbc862a 100644 --- a/tests/llvm/test_builtins_matrix.py +++ b/tests/llvm/test_builtins_matrix.py @@ -121,7 +121,7 @@ def ex(): def test_dot_llvm_constant_dim(benchmark, mode): custom_name = None - with pnlvm.LLVMBuilderContext() as ctx: + with pnlvm.LLVMBuilderContext.get_current() as ctx: custom_name = ctx.get_unique_name("vxsqm") double_ptr_ty = ctx.float_ty.as_pointer() func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty)) @@ -180,7 +180,7 @@ def ex(): def test_dot_transposed_llvm_constant_dim(benchmark, mode): custom_name = None - with pnlvm.LLVMBuilderContext() as ctx: + with pnlvm.LLVMBuilderContext.get_current() as ctx: custom_name = ctx.get_unique_name("vxsqm") double_ptr_ty = ctx.float_ty.as_pointer() func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty)) diff --git a/tests/llvm/test_builtins_random.py b/tests/llvm/test_builtins_mt_random.py similarity index 100% rename from tests/llvm/test_builtins_random.py rename to tests/llvm/test_builtins_mt_random.py diff --git a/tests/llvm/test_builtins_philox_random.py b/tests/llvm/test_builtins_philox_random.py new file mode 100644 index 00000000000..1117fcc3605 --- /dev/null +++ b/tests/llvm/test_builtins_philox_random.py @@ -0,0 +1,252 @@ +import ctypes +import numpy as np +import pytest + +from psyneulink.core import llvm as pnlvm + +SEED = 0 + +@pytest.mark.benchmark(group="Philox integer PRNG") +@pytest.mark.parametrize('mode', ['numpy', + pytest.param('LLVM', marks=pytest.mark.llvm), + pytest.param('PTX', marks=pytest.mark.cuda)]) +@pytest.mark.parametrize('seed, expected', [ + (0, [259491006799949737, 4754966410622352325, 8698845897610382596, 1686395276220330909, 18061843536446043542, 4723914225006068263]), + (-5, [4936860362606747269, 11611290354192475889, 2015254117581537576, 4620074701282684350, 9574602527017877750, 2811009141214824706]), + (15, [322160557315224026, 10187298772616605914, 11130303561932346278, 3540317624683947565, 245468466731153020, 17669502083357198575]), + (0xfeedcafe, [14360762734736817955, 5188080951818105836, 1417692977344505657, 15919241602363537044, 11006348070701344872, 12539562470140893435]), +]) +def test_random_int64(benchmark, mode, seed, expected): + res = [] + if mode == 'numpy': + state = np.random.Philox([np.uint64(seed)]) + prng = np.random.Generator(state) + def f(): + # Get uint range [0, MAX] to avoid any intermediate caching of random bits + return prng.integers(0xffffffffffffffff, dtype=np.uint64, endpoint=True) + + elif mode == 'LLVM': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') + state = init_fun.byref_arg_types[0]() + init_fun(state, seed) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int64') + out = ctypes.c_longlong() + def f(): + gen_fun(state, out) + return np.uint64(out.value) + elif mode == 'PTX': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') + state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) + init_fun.cuda_call(gpu_state, np.int64(seed)) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int64') + out = np.asarray([0], dtype=np.uint64) + gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + def f(): + gen_fun.cuda_call(gpu_state, gpu_out) + return out[0] + + # Get >4 samples to force regeneration of Philox buffer + res = [f(), f(), f(), f(), f(), f()] + assert np.allclose(res, expected) + benchmark(f) + + +@pytest.mark.benchmark(group="Philox integer PRNG") +@pytest.mark.parametrize('mode', ['numpy', + pytest.param('LLVM', marks=pytest.mark.llvm), + pytest.param('PTX', marks=pytest.mark.cuda)]) +def test_random_int32(benchmark, mode): + res = [] + if mode == 'numpy': + state = np.random.Philox([SEED]) + prng = np.random.Generator(state) + def f(): + # Get uint range [0, MAX] to avoid any intermediate caching of random bits + return prng.integers(0xffffffff, dtype=np.uint32, endpoint=True) + + elif mode == 'LLVM': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') + state = init_fun.byref_arg_types[0]() + init_fun(state, SEED) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int32') + out = ctypes.c_int() + def f(): + gen_fun(state, out) + return np.uint32(out.value) + elif mode == 'PTX': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') + state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) + init_fun.cuda_call(gpu_state, np.int64(SEED)) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int32') + out = np.asarray([0], dtype=np.uint32) + gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + def f(): + gen_fun.cuda_call(gpu_state, gpu_out) + return out[0] + + # Get >4 samples to force regeneration of Philox buffer + res = [f(), f(), f(), f(), f(), f()] + assert np.allclose(res, [582496169, 60417458, 4027530181, 1107101889, 1659784452, 2025357889]) + benchmark(f) + + +@pytest.mark.benchmark(group="Philox floating point PRNG") +@pytest.mark.parametrize('mode', ['numpy', + pytest.param('LLVM', marks=pytest.mark.llvm), + pytest.param('PTX', marks=pytest.mark.cuda)]) +def test_random_double(benchmark, mode): + res = [] + if mode == 'numpy': + state = np.random.Philox([SEED]) + prng = np.random.Generator(state) + def f(): + return prng.random(dtype=np.float64) + elif mode == 'LLVM': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') + state = init_fun.byref_arg_types[0]() + init_fun(state, SEED) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_double') + out = ctypes.c_double() + def f(): + gen_fun(state, out) + return out.value + elif mode == 'PTX': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') + state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) + init_fun.cuda_call(gpu_state, np.int64(SEED)) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_double') + out = np.asfarray([0.0], dtype=np.float64) + gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + def f(): + gen_fun.cuda_call(gpu_state, gpu_out) + return out[0] + + res = [f(), f()] + assert np.allclose(res, [0.014067035665647709, 0.2577672456246177]) + benchmark(f) + + +@pytest.mark.benchmark(group="Philox floating point PRNG") +@pytest.mark.parametrize('mode', ['numpy', + pytest.param('LLVM', marks=pytest.mark.llvm), + pytest.param('PTX', marks=pytest.mark.cuda)]) +def test_random_float(benchmark, mode): + res = [] + if mode == 'numpy': + state = np.random.Philox([SEED]) + prng = np.random.Generator(state) + def f(): + return prng.random(dtype=np.float32) + elif mode == 'LLVM': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') + state = init_fun.byref_arg_types[0]() + init_fun(state, SEED) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_float') + out = ctypes.c_float() + def f(): + gen_fun(state, out) + return out.value + elif mode == 'PTX': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') + state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) + init_fun.cuda_call(gpu_state, np.int64(SEED)) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_float') + out = np.asfarray([0.0], dtype=np.float32) + gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + def f(): + gen_fun.cuda_call(gpu_state, gpu_out) + return out[0] + + res = [f(), f()] + assert np.allclose(res, [0.13562285900115967, 0.014066934585571289]) + benchmark(f) + + +@pytest.mark.benchmark(group="Philox Normal distribution") +@pytest.mark.parametrize('mode', ['numpy', + pytest.param('LLVM', marks=pytest.mark.llvm), + pytest.param('PTX', marks=pytest.mark.cuda)]) +@pytest.mark.parametrize('fp_type', [pnlvm.ir.DoubleType(), pnlvm.ir.FloatType()], + ids=lambda x: str(x)) +def test_random_normal(benchmark, mode, fp_type): + if mode != 'numpy': + # Instantiate builder context with the desired type + pnlvm.LLVMBuilderContext(fp_type) + + dtype = np.float64 if fp_type is pnlvm.ir.DoubleType() else np.float32 + if mode == 'numpy': + state = np.random.Philox([SEED]) + prng = np.random.Generator(state) + def f(): + return prng.standard_normal(dtype=dtype) + elif mode == 'LLVM': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') + state = init_fun.byref_arg_types[0]() + init_fun(state, SEED) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_normal') + out = gen_fun.byref_arg_types[1]() + def f(): + gen_fun(state, out) + return out.value + elif mode == 'PTX': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') + state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) + init_fun.cuda_call(gpu_state, np.int64(SEED)) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_normal') + out = np.asfarray([0.0], dtype=dtype) + gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + def f(): + gen_fun.cuda_call(gpu_state, gpu_out) + return out[0] + + res = [f() for i in range(191000)] + if fp_type is pnlvm.ir.DoubleType(): + assert np.allclose(res[0:2], [-0.2059740286292238, -0.12884495093462758]) + # 208 doesn't take the fast path but wraps around the main loop + assert np.allclose(res[207:211], [-0.768690647997579, 0.4301874289485477, + -0.7803640491708955, -1.146089287628737]) + # 450 doesn't take the fast path or wrap around the main loop, + # but takes the special condition at the end of the loop + assert np.allclose(res[449:453], [-0.7713655663874537, -0.5638348710823825, + -0.9415838853097869, 0.6212784278881248]) + # 2013 takes the rare secondary loop and exists in the first iteration + # taking the positive value + assert np.allclose(res[2011:2015], [0.4201922976982861, 2.7021541445373916, + 3.7809967764329375, 0.19919094793393655]) + # 5136 takes the rare secondary loop and exists in the first iteration + # taking the negative value + assert np.allclose(res[5134:5138], [0.12317411414687844, -0.17846827974421134, + -3.6579887696059714, 0.2501530374224693]) + # 190855 takes the rare secondary loop and needs more than one iteration + assert np.allclose(res[190853:190857], [-0.26418319904491194, 0.35889007879353746, + -3.843811523424439, -1.5256469840469997]) + elif fp_type is pnlvm.ir.FloatType(): + # The indices are taken from above and don't have special meaning. + assert np.allclose(res[0:2], [-0.24822916090488434, -0.02676701545715332]) + assert np.allclose(res[207:211], [-0.33086925745010376, -1.024695873260498, + -0.5162619352340698, -0.15033885836601257]) + assert np.allclose(res[449:453], [-0.2223609834909439, 0.16769859194755554, + -0.7806711196899414, 0.5867824554443359]) + assert np.allclose(res[2011:2015], [0.1979091316461563, -0.23467595875263214, + 1.1458240747451782, -1.0285860300064087]) + assert np.allclose(res[5134:5138], [-1.0523858070373535, -3.007537603378296, + -0.4331461489200592, -0.8841480612754822]) + assert np.allclose(res[190853:190857], [-0.8958197236061096, 0.10532315075397491, + 2.000257730484009, -1.129721999168396]) + assert not any(np.isnan(res)), list(np.isnan(res)).index(True) + benchmark(f) diff --git a/tests/llvm/test_custom_func.py b/tests/llvm/test_custom_func.py index 7bfbbc0b5e6..5435b9f3013 100644 --- a/tests/llvm/test_custom_func.py +++ b/tests/llvm/test_custom_func.py @@ -36,7 +36,7 @@ def test_fixed_dimensions__pnl_builtin_vxm(mode): custom_name = None - with pnlvm.LLVMBuilderContext() as ctx: + with pnlvm.LLVMBuilderContext.get_current() as ctx: custom_name = ctx.get_unique_name("vxsqm") double_ptr_ty = ctx.convert_python_struct_to_llvm_ir(1.0).as_pointer() func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, double_ptr_ty, double_ptr_ty)) @@ -76,7 +76,7 @@ def test_fixed_dimensions__pnl_builtin_vxm(mode): ], ids=lambda x: str(x.dtype)) def test_integer_broadcast(mode, val): custom_name = None - with pnlvm.LLVMBuilderContext() as ctx: + with pnlvm.LLVMBuilderContext.get_current() as ctx: custom_name = ctx.get_unique_name("broadcast") int_ty = ctx.convert_python_struct_to_llvm_ir(val) int_array_ty = ir.ArrayType(int_ty, 8) diff --git a/tests/llvm/test_helpers.py b/tests/llvm/test_helpers.py index 1b12f129982..7d41f9930fc 100644 --- a/tests/llvm/test_helpers.py +++ b/tests/llvm/test_helpers.py @@ -20,7 +20,7 @@ pytest.param('PTX', marks=pytest.mark.cuda)]) def test_helper_fclamp(mode): - with pnlvm.LLVMBuilderContext() as ctx: + with pnlvm.LLVMBuilderContext.get_current() as ctx: double_ptr_ty = ir.DoubleType().as_pointer() func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, ctx.int32_ty, double_ptr_ty)) @@ -65,7 +65,7 @@ def test_helper_fclamp(mode): pytest.param('PTX', marks=pytest.mark.cuda)]) def test_helper_fclamp_const(mode): - with pnlvm.LLVMBuilderContext() as ctx: + with pnlvm.LLVMBuilderContext.get_current() as ctx: double_ptr_ty = ir.DoubleType().as_pointer() func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, ctx.int32_ty)) @@ -104,7 +104,7 @@ def test_helper_fclamp_const(mode): pytest.param('PTX', marks=pytest.mark.cuda)]) def test_helper_is_close(mode): - with pnlvm.LLVMBuilderContext() as ctx: + with pnlvm.LLVMBuilderContext.get_current() as ctx: double_ptr_ty = ir.DoubleType().as_pointer() func_ty = ir.FunctionType(ir.VoidType(), [double_ptr_ty, double_ptr_ty, double_ptr_ty, ctx.int32_ty]) @@ -158,7 +158,7 @@ def test_helper_is_close(mode): pytest.param('PTX', marks=pytest.mark.cuda)]) def test_helper_all_close(mode): - with pnlvm.LLVMBuilderContext() as ctx: + with pnlvm.LLVMBuilderContext.get_current() as ctx: arr_ptr_ty = ir.ArrayType(ir.DoubleType(), DIM_X).as_pointer() func_ty = ir.FunctionType(ir.VoidType(), [arr_ptr_ty, arr_ptr_ty, ir.IntType(32).as_pointer()]) @@ -202,7 +202,7 @@ def test_helper_all_close(mode): @pytest.mark.skipif(sys.platform == 'win32', reason="Loading C library is complicated on windows") def test_helper_printf(capfd, ir_argtype, format_spec, values_to_check): format_str = f"Hello {(format_spec+' ')*len(values_to_check)} \n" - with pnlvm.LLVMBuilderContext() as ctx: + with pnlvm.LLVMBuilderContext.get_current() as ctx: func_ty = ir.FunctionType(ir.VoidType(), []) ir_values_to_check = [ir_argtype(i) for i in values_to_check] custom_name = ctx.get_unique_name("test_printf") @@ -396,9 +396,13 @@ def test_helper_array_from_shape(self, ir_type, shape): (pnlvm.helpers.exp, 1.0, 2.718281828459045), (pnlvm.helpers.coth, 1.0, 1.3130352854993313), (pnlvm.helpers.csch, 1.0, 0.8509181282393215), + (pnlvm.helpers.log, 1.0, 0.0), + (pnlvm.helpers.log1p, 1.0, 0.6931471805599453), ]) -def test_helper_numerical(mode, op, var, expected): - with pnlvm.LLVMBuilderContext() as ctx: +@pytest.mark.parametrize('fp_type', [pnlvm.ir.DoubleType(), pnlvm.ir.FloatType()], + ids=lambda x: str(x)) +def test_helper_numerical(mode, op, var, expected, fp_type): + with pnlvm.LLVMBuilderContext(fp_type) as ctx: func_ty = ir.FunctionType(ir.VoidType(), [ctx.float_ty.as_pointer()]) custom_name = ctx.get_unique_name("numerical") @@ -419,12 +423,10 @@ def test_helper_numerical(mode, op, var, expected): bin_f(ctypes.byref(res)) res = res.value else: - # FIXME: this needs to consider ctx.float_ty - res = np.array([var], dtype=np.float64) + res = np.ctypeslib.as_array(bin_f.byref_arg_types[0](var)) bin_f.cuda_wrap_call(res) - res = res[0] - assert res == expected + assert np.allclose(res, expected) @pytest.mark.llvm @pytest.mark.parametrize('mode', ['CPU', @@ -434,7 +436,7 @@ def test_helper_numerical(mode, op, var, expected): (np.array([[1,2],[3,4]], dtype=np.float64), np.array([[2,3],[4,5]], dtype=np.float64)), ], ids=["vector", "matrix"]) def test_helper_elementwise_op(mode, var, expected): - with pnlvm.LLVMBuilderContext() as ctx: + with pnlvm.LLVMBuilderContext.get_current() as ctx: arr_ptr_ty = ctx.convert_python_struct_to_llvm_ir(var).as_pointer() func_ty = ir.FunctionType(ir.VoidType(), [arr_ptr_ty, arr_ptr_ty]) @@ -477,7 +479,7 @@ def test_helper_elementwise_op(mode, var, expected): [23.,25.,27.]])), ]) def test_helper_recursive_iterate_arrays(mode, var1, var2, expected): - with pnlvm.LLVMBuilderContext() as ctx: + with pnlvm.LLVMBuilderContext.get_current() as ctx: arr_ptr_ty = ctx.convert_python_struct_to_llvm_ir(var1).as_pointer() func_ty = ir.FunctionType(ir.VoidType(), [arr_ptr_ty, arr_ptr_ty, arr_ptr_ty]) diff --git a/tests/log/test_log.py b/tests/log/test_log.py index 9cb42a1c37f..37e6a50bf06 100644 --- a/tests/log/test_log.py +++ b/tests/log/test_log.py @@ -1165,7 +1165,7 @@ def node_logged_in_simulation(self): controller=pnl.OptimizationControlMechanism( agent_rep=comp, state_features=[Input.input_port, reward.input_port], - state_feature_function=pnl.AdaptiveIntegrator(rate=0.5), + state_feature_functions=pnl.AdaptiveIntegrator(rate=0.5), objective_mechanism=pnl.ObjectiveMechanism( function=pnl.LinearCombination(operation=pnl.PRODUCT), monitor=[ diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index 81f24448cd4..ef9f2ac0643 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -14,6 +14,7 @@ from psyneulink.core.scheduling.condition import Never, WhenFinished from psyneulink.core.scheduling.time import TimeScale from psyneulink.core.globals.keywords import IDENTITY_MATRIX, FULL_CONNECTIVITY_MATRIX +from psyneulink.core.globals.utilities import _SeededPhilox from psyneulink.library.components.mechanisms.processing.integrator.ddm import \ ARRAY, DDM, DDMError, DECISION_VARIABLE_ARRAY, SELECTED_INPUT_ARRAY @@ -234,12 +235,15 @@ def test_selected_input_array(self): @pytest.mark.ddm_mechanism @pytest.mark.mechanism @pytest.mark.benchmark -def test_DDM_Integrator_Bogacz(benchmark, mech_mode): +@pytest.mark.parametrize('prng', ['Default', 'Philox']) +def test_DDM_Integrator_Bogacz(benchmark, mech_mode, prng): stim = 10 T = DDM( name='DDM', function=DriftDiffusionAnalytical() ) + if prng == 'Philox': + T.parameters.random_state.set(_SeededPhilox([0])) ex = pytest.helpers.get_mech_execution(T, mech_mode) val = ex(stim)[0] diff --git a/tests/mechanisms/test_episodic_memory.py b/tests/mechanisms/test_episodic_memory.py index 6c7042cfe04..b4d02760546 100644 --- a/tests/mechanisms/test_episodic_memory.py +++ b/tests/mechanisms/test_episodic_memory.py @@ -58,9 +58,9 @@ def test_with_dictionary_memory(variable, func, params, expected, benchmark, mec if benchmark.enabled: benchmark(EX, variable) + # TEST WITH ContentAddressableMemory *********************************************************************************** # Note: ContentAddressableMemory has not yet been compiled for use with LLVM or PTX, so those are dummy tests for now - test_data = [ ( # name diff --git a/tests/mechanisms/test_input_state_spec.py b/tests/mechanisms/test_input_state_spec.py index 16b38dbe54a..241ded93279 100644 --- a/tests/mechanisms/test_input_state_spec.py +++ b/tests/mechanisms/test_input_state_spec.py @@ -132,7 +132,7 @@ def test_mismatch_dim_input_ports_with_default_variable_error(self): default_variable=[[0], [0]], input_ports=[[[32],[24]],'HELLO'] ) - assert 'Port value' in str(error_text.value) and 'does not match reference_value' in str(error_text.value) + assert 'The value' in str(error_text.value) and 'does not match the reference_value' in str(error_text.value) # ------------------------------------------------------------------------------------------------ # TEST 3 diff --git a/tests/mechanisms/test_mechanisms.py b/tests/mechanisms/test_mechanisms.py index b4f974df3f4..34569eb6ee9 100644 --- a/tests/mechanisms/test_mechanisms.py +++ b/tests/mechanisms/test_mechanisms.py @@ -50,8 +50,8 @@ def test_noise_variations(self, noise): t2 = pnl.TransferMechanism(name='t2', size=2) t2.integrator_function.parameters.noise.set(noise()) - t1.integrator_function.noise.base.random_state = np.random.RandomState([0]) - t2.integrator_function.noise.base.random_state = np.random.RandomState([0]) + t1.integrator_function.noise.seed = 0 + t2.integrator_function.noise.base.seed = 0 for _ in range(5): np.testing.assert_equal(t1.execute([1, 1]), t2.execute([1, 1])) diff --git a/tests/mechanisms/test_transfer_mechanism.py b/tests/mechanisms/test_transfer_mechanism.py index c627db0e3cf..ac528e7fdd0 100644 --- a/tests/mechanisms/test_transfer_mechanism.py +++ b/tests/mechanisms/test_transfer_mechanism.py @@ -301,18 +301,7 @@ def test_transfer_mech_exponential_noise(self): def test_transfer_mech_uniform_to_normal_noise(self): try: import scipy - T = TransferMechanism( - name='T', - default_variable=[0, 0, 0, 0], - function=Linear(), - noise=UniformToNormalDist(), - integration_rate=1.0 - ) - T.noise.base.parameters.random_state.get(None).seed(22) - val = T.execute([0, 0, 0, 0]) - assert np.allclose(val, [[-0.81177443, -0.04593492, -0.20051725, 1.07665147]]) - - except: + except ModuleNotFoundError: with pytest.raises(FunctionError) as error_text: T = TransferMechanism( name='T', @@ -322,6 +311,20 @@ def test_transfer_mech_uniform_to_normal_noise(self): integration_rate=1.0 ) assert "The UniformToNormalDist function requires the SciPy package." in str(error_text.value) + else: + T = TransferMechanism( + name='T', + default_variable=[0, 0, 0, 0], + function=Linear(), + noise=UniformToNormalDist(), + integration_rate=1.0 + ) + # This is equivalent to + # T.noise.base.parameters.random_state.get(None).seed([22]) + T.noise.parameters.seed.set(22, None) + val = T.execute([0, 0, 0, 0]) + assert np.allclose(val, [[1.73027452, -1.07866481, -1.98421126, 2.99564032]]) + @pytest.mark.mechanism diff --git a/tests/misc/test_notebooks.py b/tests/misc/test_notebooks.py index 30bb02d4924..f2d4f2ce566 100644 --- a/tests/misc/test_notebooks.py +++ b/tests/misc/test_notebooks.py @@ -43,6 +43,7 @@ def test_ipynb(filepath): else: del os.environ["PYTHONPATH"] + if __name__ == '__main__': for filepath in _find_ipynbs(): print('Running {}'.format(filepath)) diff --git a/tests/misc/test_parameters.py b/tests/misc/test_parameters.py index 4e86418ae63..7f9cf8828c8 100644 --- a/tests/misc/test_parameters.py +++ b/tests/misc/test_parameters.py @@ -335,6 +335,20 @@ def test_param_attrs_match(self, obj, parameter_name, attr_name): assert getattr(shared_param, attr_name) == getattr(source_param, attr_name) + orig_values = shared_param.stateful, source_param.stateful + + # change value of shared attribute on source parameter + source_param.stateful = not source_param.stateful + assert getattr(shared_param, attr_name) == getattr(source_param, attr_name) + + shared_param.stateful, source_param.stateful = orig_values + + # change value of shared attribute on sharedparameter + shared_param.stateful = not shared_param.stateful + assert getattr(shared_param, attr_name) == getattr(source_param, attr_name) + + shared_param.stateful, source_param.stateful = orig_values + @pytest.mark.parametrize( 'integrator_function, expected_rate', [ diff --git a/tests/models/test_greedy_agent.py b/tests/models/test_greedy_agent.py index 45146bf21c2..8a75534e408 100644 --- a/tests/models/test_greedy_agent.py +++ b/tests/models/test_greedy_agent.py @@ -13,6 +13,7 @@ from psyneulink.core.components.ports.modulatorysignals.controlsignal import ControlSignal from psyneulink.core.compositions.composition import Composition, NodeRole from psyneulink.core.globals.keywords import VARIANCE, NORMED_L0_SIMILARITY +from psyneulink.core.globals.utilities import _SeededPhilox from psyneulink.library.components.mechanisms.processing.objective.comparatormechanism import ComparatorMechanism @@ -115,9 +116,10 @@ def test_simplified_greedy_agent_random(benchmark, comp_mode): @pytest.mark.parametrize("samples", [[0,10], pytest.param([0,3,6,10], marks=pytest.mark.stress), pytest.param([0,2,4,6,8,10], marks=pytest.mark.stress), - pytest.param([a / 10.0 for a in range(0, 101)]), + pytest.param([a / 10.0 for a in range(0, 101)], marks=pytest.mark.stress), ], ids=lambda x: len(x)) -def test_predator_prey(benchmark, mode, samples): +@pytest.mark.parametrize('prng', ['Default', 'Philox']) +def test_predator_prey(benchmark, mode, prng, samples): if len(samples) > 10 and mode not in {pnl.ExecutionMode.LLVM, pnl.ExecutionMode.LLVMExec, pnl.ExecutionMode.LLVMRun, @@ -133,77 +135,114 @@ def test_predator_prey(benchmark, mode, samples): benchmark.group = "Predator-Prey " + str(len(samples)) obs_len = 3 obs_coords = 2 - player_idx = 0 - player_obs_start_idx = player_idx * obs_len - player_value_idx = player_idx * obs_len + obs_coords - player_coord_slice = slice(player_obs_start_idx,player_value_idx) - predator_idx = 1 - predator_obs_start_idx = predator_idx * obs_len - predator_value_idx = predator_idx * obs_len + obs_coords - predator_coord_slice = slice(predator_obs_start_idx,predator_value_idx) - prey_idx = 2 - prey_obs_start_idx = prey_idx * obs_len - prey_value_idx = prey_idx * obs_len + obs_coords - prey_coord_slice = slice(prey_obs_start_idx,prey_value_idx) player_len = prey_len = predator_len = obs_coords + # Input Mechanisms + player_pos = ProcessingMechanism(size=player_len, name="PLAYER POS") + prey_pos = ProcessingMechanism(size=prey_len, name="PREY POS") + predator_pos = ProcessingMechanism(size=predator_len, name="PREDATOR POS") + # Perceptual Mechanisms player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS") - # Action Mechanism - # Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey: - # note: unitization is done in main loop, to allow compilation of LinearCombination function) (TBI) - greedy_action_mech = ComparatorMechanism(name='ACTION',sample=player_obs,target=prey_obs) + + def action_fn(variable): + predator_pos = variable[0] + player_pos = variable[1] + prey_pos = variable[2] + + # Directions away from predator and towards prey + pred_2_player = player_pos - predator_pos + play_2_prey = prey_pos - player_pos + + # Distances to predator and prey + distance_predator = np.sqrt(pred_2_player[0] * pred_2_player[0] + pred_2_player[1] * pred_2_player[1]) + distance_prey = np.sqrt(play_2_prey[0] * play_2_prey[0] + play_2_prey[1] * play_2_prey[1]) + + # Normalized directions from predator and towards prey + pred_2_player_norm = pred_2_player / distance_predator + play_2_prey_norm = play_2_prey / distance_prey + + # Weighted directions from predator and towards prey + # weights are reversed so closer agent has greater impact on movement + pred_2_player_n = pred_2_player_norm * (distance_prey / (distance_predator + distance_prey)) + play_2_prey_n = play_2_prey_norm * (distance_predator / (distance_predator + distance_prey)) + + return pred_2_player_n + play_2_prey_n + + # note: unitization is done in main loop + greedy_action_mech = pnl.ProcessingMechanism(function=action_fn, input_ports=["predator", "player", "prey"], + default_variable=[[0,0],[0,0],[0,0]], name="ACTION") + + direct_move = ComparatorMechanism(name='DIRECT MOVE',sample=player_pos, target=prey_pos) # Create Composition agent_comp = Composition(name='PREDATOR-PREY COMPOSITION') - agent_comp.add_node(player_obs) - agent_comp.add_node(predator_obs) - agent_comp.add_node(prey_obs) + agent_comp.add_linear_processing_pathway([player_pos, player_obs]) + agent_comp.add_linear_processing_pathway([prey_pos, prey_obs]) + agent_comp.add_linear_processing_pathway([predator_pos, predator_obs]) agent_comp.add_node(greedy_action_mech) - agent_comp.exclude_node_roles(predator_obs, NodeRole.OUTPUT) + agent_comp.add_node(direct_move) + agent_comp.add_projection(pnl.MappingProjection(predator_obs, greedy_action_mech.input_ports[0])) + agent_comp.add_projection(pnl.MappingProjection(prey_obs, greedy_action_mech.input_ports[1])) + agent_comp.add_projection(pnl.MappingProjection(player_obs, greedy_action_mech.input_ports[2])) + agent_comp.exclude_node_roles(direct_move, NodeRole.OUTPUT) - ocm = OptimizationControlMechanism(state_features={SHADOW_INPUTS: [player_obs, predator_obs, prey_obs]}, + + ocm = OptimizationControlMechanism(state_features={SHADOW_INPUTS: [player_pos, predator_pos, prey_pos]}, agent_rep=agent_comp, function=GridSearch(direction=MINIMIZE, save_values=True), objective_mechanism=ObjectiveMechanism(function=Distance(metric=NORMED_L0_SIMILARITY), monitor=[ - player_obs, - prey_obs + greedy_action_mech, + direct_move ]), - control_signals=[ControlSignal(modulates=(VARIANCE,player_obs), - allocation_samples=samples), - ControlSignal(modulates=(VARIANCE,predator_obs), - allocation_samples=samples), - ControlSignal(modulates=(VARIANCE,prey_obs), - allocation_samples=samples) + control_signals=[ControlSignal(modulates=(VARIANCE, player_obs), + allocation_samples=samples, + cost_options=pnl.CostFunctions.INTENSITY), + ControlSignal(modulates=(VARIANCE, predator_obs), + allocation_samples=samples, + cost_options=pnl.CostFunctions.INTENSITY), + ControlSignal(modulates=(VARIANCE, prey_obs), + allocation_samples=samples, + cost_options=pnl.CostFunctions.INTENSITY), ], ) agent_comp.add_controller(ocm) agent_comp.enable_controller = True ocm.comp_execution_mode = ocm_mode - input_dict = {player_obs:[[1.1576537, 0.60782117]], - predator_obs:[[-0.03479106, -0.47666293]], - prey_obs:[[-0.60836214, 0.1760381 ]], + if prng == 'Philox': + player_obs.function.parameters.random_state.set(_SeededPhilox([0])) + prey_obs.function.parameters.random_state.set(_SeededPhilox([0])) + predator_obs.function.parameters.random_state.set(_SeededPhilox([0])) + ocm.function.parameters.random_state.set(_SeededPhilox([0])) + + input_dict = {player_pos:[[1.1576537, 0.60782117]], + predator_pos:[[-0.03479106, -0.47666293]], + prey_pos:[[-0.60836214, 0.1760381 ]], } run_results = agent_comp.run(inputs=input_dict, num_trials=2, execution_mode=mode) if len(samples) == 2: - # KDM 12/4/19: modified results due to global seed offset of - # GaussianDistort assignment. - # to produce old numbers, run get_global_seed once before creating - # each Mechanism with GaussianDistort above - assert np.allclose(run_results[0], [[-10.06333025, 2.4845505 ]]) + if prng == 'Default': + assert np.allclose(run_results[0], [[0.9705216285127504, -0.1343332460369043]]) + elif prng == 'Philox': + assert np.allclose(run_results[0], [[-0.16882940384606543, -0.07280074899749223]]) + else: + assert False, "Unknown PRNG!" + if mode is pnl.ExecutionMode.Python: - assert np.allclose(ocm.state_feature_values, [[ 1.1576537, 0.60782117], - [-0.03479106, -0.47666293], - [-0.60836214, 0.1760381 ]]) + # FIXEM: The results are 'close' for both Philox and MT, + # because they're dominated by costs + assert np.allclose(np.asfarray(ocm.function.saved_values).flatten(), + [-2.66258741, -22027.9970321, -22028.17515945, -44053.59867802, + -22028.06045185, -44053.4048842, -44053.40736234, -66078.90687915]) if benchmark.enabled: benchmark(agent_comp.run, inputs=input_dict, execution_mode=mode) diff --git a/tests/ports/test_parameter_ports.py b/tests/ports/test_parameter_ports.py index 6f08e1d1b39..f294cfd003c 100644 --- a/tests/ports/test_parameter_ports.py +++ b/tests/ports/test_parameter_ports.py @@ -178,6 +178,31 @@ class Parameters(TransferMechanism.Parameters): ): mech.parameter_ports['offset'] + def test_duplicate_from_nested_class(self): + class NewFunc(pnl.SimpleIntegrator): + class Parameters(pnl.SimpleIntegrator.Parameters): + func_a = pnl.Parameter(pnl.UniformDist, modulable=True, stateful=False) + + class NewMech(TransferMechanism): + class Parameters(TransferMechanism.Parameters): + offset = pnl.Parameter(0, modulable=True) + noise = pnl.Parameter(pnl.UniformDist, modulable=True) + func_b = pnl.Parameter(NewFunc, stateful=False) + + mech = NewMech() + + assert mech.parameter_ports['offset-self'].source is mech.parameters.offset + assert mech.parameter_ports['offset-integrator_function'].source is mech.integrator_function.parameters.offset + + assert mech.parameter_ports['seed-func_b-func_a'].source is mech.func_b.func_a.parameters.seed + assert mech.parameter_ports['seed-noise'].source is mech.noise.parameters.seed + + assert mech.parameter_ports['high-func_b-func_a'].source is mech.func_b.func_a.parameters.high + assert mech.parameter_ports['high-noise'].source is mech.noise.parameters.high + + assert mech.parameter_ports['low-func_b-func_a'].source is mech.func_b.func_a.parameters.low + assert mech.parameter_ports['low-noise'].source is mech.noise.parameters.low + def test_duplicate_sources(self, transfer_mech): assert transfer_mech.parameter_ports['offset-function'].source is transfer_mech.function.parameters.offset assert transfer_mech.parameter_ports['offset-integrator_function'].source is transfer_mech.integrator_function.parameters.offset @@ -206,3 +231,17 @@ def test_alias_duplicate_base_access_fails(self): match='Did you want leak-function or rate' ): mech.parameter_ports['leak'] + + def test_subsubfunction_params_included_in_transfer_mech(self): + t = pnl.TransferMechanism(noise=pnl.UniformDist()) + + noise_func = t.integrator_function.noise + assert t.parameter_ports['seed'].source is noise_func.parameters.seed + assert t.parameter_ports['high'].source is noise_func.parameters.high + assert t.parameter_ports['low'].source is noise_func.parameters.low + + def test_source_uninitialized_functions(self): + m = pnl.EpisodicMemoryMechanism() + + assert m.parameter_ports['seed-function'].source is m.function.parameters.seed + assert m.parameter_ports['seed-function-selection_function'].source is m.function.selection_function.parameters.seed diff --git a/tutorial_requirements.txt b/tutorial_requirements.txt index 4f354bda2eb..98ae285469c 100644 --- a/tutorial_requirements.txt +++ b/tutorial_requirements.txt @@ -1,3 +1,3 @@ -graphviz<0.18.0 +graphviz<0.20.0 jupyter<=1.0.0 matplotlib<3.4.4