Skip to content

Commit

Permalink
Merge pull request #2030 from PrincetonUniversity/devel
Browse files Browse the repository at this point in the history
Devel
  • Loading branch information
kmantel authored Apr 28, 2021
2 parents 4c60958 + 8425ba5 commit b7816cb
Show file tree
Hide file tree
Showing 17 changed files with 911 additions and 231 deletions.
64 changes: 64 additions & 0 deletions .github/workflows/compare-comment.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
name: "Add doc diff to PR comment"

on:
workflow_run:
workflows: ["PsyNeuLink Docs Compare"]
types:
- completed

jobs:
post-comment:
runs-on: ubuntu-latest
if: >
${{ github.event.workflow_run.event == 'pull_request' &&
github.event.workflow_run.conclusion == 'success' }}
steps:

- name: 'Download artifact'
uses: actions/[email protected]
with:
script: |
var artifacts = await github.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: ${{github.event.workflow_run.id }},
});
var matchArtifact = artifacts.data.artifacts.filter((artifact) => {
return artifact.name.startsWith("compare")
}).slice(-1)[0];
var download = await github.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: matchArtifact.id,
archive_format: 'zip',
});
var fs = require('fs');
fs.writeFileSync('${{github.workspace}}/pr.zip', Buffer.from(download.data));
- name: Unzip artifact
run: unzip pr.zip

- name: Post comment with docs diff
uses: actions/[email protected]
with:
script: |
var fs = require("fs");
var issue_number = Number(fs.readFileSync('./PR_NR'));
var os = fs.readFileSync('./PR_OS', 'utf8').trim();
var python = fs.readFileSync('./PR_PYTHON_VERSION', 'utf8').trim();
var text = fs.readFileSync("./result.diff").slice(0,16384);
// basic input checks
console.assert(['ubuntu-latest', 'windows-latest', 'macos-latest'].includes(os), 'Unexpected os: %s', os);
console.assert(['3.6', '3.7', '3.8'].includes(python), 'Unexpected python: %s', python);
console.assert(!text.includes('```'), 'Invalid diff!');
console.log('Posting diff to PR:' + issue_number)
github.issues.createComment({
issue_number: issue_number,
owner: context.repo.owner,
repo: context.repo.repo,
body: 'This PR causes the following changes to the html docs (' + os + ', python-' + python + '):\n```\n' + text + '\n...\n```\nSee CI logs for the full diff.'
})
7 changes: 2 additions & 5 deletions .github/workflows/pnl-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ name: PsyNeuLink CI

on:
push:
branches-ignore:
- 'dependabot/**'
paths-ignore:
- 'docs/**'
- 'doc_requirements.txt'
Expand All @@ -19,11 +21,6 @@ jobs:
python-version: [3.6, 3.7, 3.8]
python-architecture: ['x64']
os: [ubuntu-latest, macos-latest, windows-latest]
include:
# add 32-bit build on windows
- python-version: 3.8
python-architecture: 'x86'
os: windows-latest

steps:
- name: Checkout sources
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,30 +87,18 @@ jobs:
- name: Compare
shell: bash
run: |
mkdir -p compare
# Store the resulting diff, or 'No differences!' to and output file
# The 'or true' part is needed to workaourd 'pipefail' flag used by github-actions
(diff -r docs-base docs-merge && echo 'No differences!' || true) | tee result.diff
- name: Post comment
uses: actions/[email protected]
# Post comment only if not PR across repos
# if: ${{ github.event.base.full_name }} == ${{ github.event.head.repo.full_name }}
# The 'or true' part is needed to workaround 'pipefail' flag used by github-actions
(diff -r docs-base docs-merge && echo 'No differences!' || true) | tee ./compare/result.diff
# save PR number
echo ${{ github.event.number }} > ./compare/PR_NR
echo ${{ matrix.os }} > ./compare/PR_OS
echo ${{ matrix.python-version }} > ./compare/PR_PYTHON_VERSION
- name: Upload comparison results
uses: actions/[email protected]
with:
script: |
// Post comment only if not PR across repos
console.log(context.payload.pull_request.base.repo.full_name)
console.log(context.payload.pull_request.head.repo.full_name)
var base_repo_name = context.payload.pull_request.base.repo.full_name
var head_repo_name = context.payload.pull_request.head.repo.full_name
if (base_repo_name != head_repo_name) return ;
var fs = require("fs");
var text = fs.readFileSync("./result.diff").slice(0,16384);
github.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: 'This PR causes the following changes to the html docs (${{ matrix.os }}, python-${{ matrix.python-version }}):\n```\n' + text + '\n...\n```\nSee CI logs for the full diff.'
})
name: compare-${{ matrix.os }}-${{ matrix.python-version }}
path: compare
retention-days: 1
7 changes: 5 additions & 2 deletions Scripts/Models (Under Development)/N-back.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,17 @@
# TODO:
# Nback::
# - separate out stim/context external inputs from those from EM into FFN
# - figure out how to specify feedback from DDM to EM
# - figure out how to specify feedback from DDM to EM:
# - figure out how to execute EM twice:
# > first, at beginning of trial, to retrieve item based on current stimulus & context
# (with prob retrieval = 1, prob storage = 0)
# > second time, at end of trial (under influence of ControlMechanism) to encode current stimulus & context
# (with prob storage = 1; prob of retrieval = 0)
# scheduler.add_condition(A, pnl.AfterNCalls(CM, 1))
# scheduler.add_condition(CM, pnl.Always())
# composition.run(...termination_conds={pnl.TimeScale.TRIAL: pnl.And(pnl.AfterNCalls(CM, 2), pnl.JustRan(CM))})
# - implement circular drift as function for an input mechanism
# - BUG: should be able to use InputPort as spec for a pathway (if there is nothing after it);
# - ADD PNL FEATURE: should be able to use InputPort as spec for a pathway (if there is nothing after it);
# same for OutputPort (if there is nothing before it)


Expand Down
2 changes: 1 addition & 1 deletion docs/source/EpisodicMemoryMechanism.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ EpisodicMemoryMechanism
.. automodule:: psyneulink.library.components.mechanisms.processing.integrator.episodicmemorymechanism
:members:
:private-members:
:exclude-members: random, Parameters, _instantiate_input_ports, _instantiate_output_ports, _parse_function_variable, memory
:exclude-members: random, Parameters
78 changes: 50 additions & 28 deletions psyneulink/core/components/functions/learningfunctions.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,23 +22,23 @@
"""

import types
from collections import namedtuple

import numpy as np
import typecheck as tc
import types

from psyneulink.core.components.functions.function import Function_Base, FunctionError, is_function_type
from psyneulink.core.components.functions.transferfunctions import Logistic
from psyneulink.core.components.component import ComponentError
from psyneulink.core.components.functions.function import Function_Base, FunctionError, is_function_type
from psyneulink.core.components.functions.transferfunctions import Logistic, SoftMax
from psyneulink.core.globals.context import handle_external_context
from psyneulink.core.globals.keywords import \
CONTRASTIVE_HEBBIAN_FUNCTION, TDLEARNING_FUNCTION, LEARNING_FUNCTION_TYPE, LEARNING_RATE, \
KOHONEN_FUNCTION, GAUSSIAN, LINEAR, EXPONENTIAL, HEBBIAN_FUNCTION, RL_FUNCTION, BACKPROPAGATION_FUNCTION, MATRIX, \
MSE, SSE
from psyneulink.core.globals.parameters import Parameter
from psyneulink.core.globals.context import ContextFlags, handle_external_context
from psyneulink.core.globals.utilities import is_numeric, scalar_distance, get_global_seed, convert_to_np_array
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set
from psyneulink.core.globals.utilities import is_numeric, scalar_distance, get_global_seed, convert_to_np_array

__all__ = ['LearningFunction', 'Kohonen', 'Hebbian', 'ContrastiveHebbian',
'Reinforcement', 'BayesGLM', 'BackPropagation', 'TDLearning',
Expand Down Expand Up @@ -132,8 +132,12 @@ class Parameters(Function_Base.Parameters):
:default value: 0.05
:type: ``float``
"""
variable = Parameter(np.array([0, 0, 0]), read_only=True, pnl_internal=True, constructor_argument='default_variable')
learning_rate = Parameter(0.05, modulable=True)
variable = Parameter(np.array([0, 0, 0]),
read_only=True,
pnl_internal=True,
constructor_argument='default_variable')
learning_rate = Parameter(0.05,
modulable=True)

def _validate_learning_rate(self, learning_rate, type=None):

Expand Down Expand Up @@ -412,8 +416,15 @@ class Parameters(LearningFunction.Parameters):
:type: ``int``
"""
random_state = Parameter(None, stateful=True, loggable=False)
variable = Parameter([np.array([0, 0, 0]), np.array([0])], read_only=True, pnl_internal=True, constructor_argument='default_variable')
value = Parameter(np.array([0]), read_only=True, aliases=['sample_weights'], pnl_internal=True)
variable = Parameter([np.array([0, 0, 0]),
np.array([0])],
read_only=True,
pnl_internal=True,
constructor_argument='default_variable')
value = Parameter(np.array([0]),
read_only=True,
aliases=['sample_weights'],
pnl_internal=True)

Lambda_0 = 0
Lambda_prior = 0
Expand Down Expand Up @@ -747,7 +758,10 @@ class Parameters(LearningFunction.Parameters):
:default value: `GAUSSIAN`
:type: ``str``
"""
variable = Parameter([[0, 0], [0, 0], np.array([[0, 0], [0, 0]])], read_only=True, pnl_internal=True, constructor_argument='default_variable')
variable = Parameter([[0, 0], [0, 0], np.array([[0, 0], [0, 0]])],
read_only=True,
pnl_internal=True,
constructor_argument='default_variable')
distance_function = Parameter(GAUSSIAN, stateful=False)

def _validate_distance_function(self, distance_function):
Expand Down Expand Up @@ -1024,8 +1038,12 @@ class Parameters(LearningFunction.Parameters):
:default value: 0.05
:type: ``float``
"""
variable = Parameter(np.array([0, 0]), read_only=True, pnl_internal=True, constructor_argument='default_variable')
learning_rate = Parameter(0.05, modulable=True)
variable = Parameter(np.array([0, 0]),
read_only=True,
pnl_internal=True,
constructor_argument='default_variable')
learning_rate = Parameter(0.05,
modulable=True)
default_learning_rate = 0.05

def __init__(self,
Expand Down Expand Up @@ -1254,7 +1272,10 @@ class Parameters(LearningFunction.Parameters):
:type: ``numpy.ndarray``
:read only: True
"""
variable = Parameter(np.array([0, 0]), read_only=True, pnl_internal=True, constructor_argument='default_variable')
variable = Parameter(np.array([0, 0]),
read_only=True,
pnl_internal=True,
constructor_argument='default_variable')

default_learning_rate = 0.05

Expand Down Expand Up @@ -1541,7 +1562,10 @@ class Parameters(LearningFunction.Parameters):
:type: ``list``
:read only: True
"""
variable = Parameter(np.array([[0], [0], [0]]), read_only=True, pnl_internal=True, constructor_argument='default_variable')
variable = Parameter(np.array([[0], [0], [0]]),
read_only=True,
pnl_internal=True,
constructor_argument='default_variable')
activation_input = Parameter([0], read_only=True, getter=_activation_input_getter)
activation_output = Parameter([0], read_only=True, getter=_activation_output_getter)
error_signal = Parameter([0], read_only=True, getter=_error_signal_getter)
Expand Down Expand Up @@ -1580,15 +1604,13 @@ def _validate_variable(self, variable, context=None):
"single element for {}".
format(self.name, variable[LEARNING_ERROR_OUTPUT]))

# Allow initialization with zero but not during a run (i.e., when called from check_args())
if not self.is_initializing:
if np.count_nonzero(variable[LEARNING_ACTIVATION_OUTPUT]) != 1:
raise ComponentError(
"Second item ({}) of variable for {} must be an array with only one non-zero value "
"(if output Mechanism being trained uses softmax,"
" its \'output\' arg may need to be set to to PROB)".
format(variable[LEARNING_ACTIVATION_OUTPUT], self.componentName))

# Must have only one (or no) non-zero entries in LEARNING_ACTIVATION_OUTPUT
if np.count_nonzero(variable[LEARNING_ACTIVATION_OUTPUT]) > 1:
owner_str = f" of {self.owner.name}" if self.owner else ""
raise ComponentError(f"Second item ({variable[LEARNING_ACTIVATION_OUTPUT]}) of variable for "
f"{self.componentName}{owner_str} must be an array with no more than one non-zero "
f"value; if output Mechanism being trained uses {SoftMax.componentName},"
f" that function's \'output\' arg may need to be set to to 'PROB').")
return variable

def _validate_params(self, request_set, target_set=None, context=None):
Expand Down Expand Up @@ -1890,24 +1912,24 @@ class Parameters(LearningFunction.Parameters):
:type:
:read only: True
"""
variable = Parameter(np.array([[0], [0], [0]]), read_only=True, pnl_internal=True, constructor_argument='default_variable')
variable = Parameter(np.array([[0], [0], [0]]),
read_only=True,
pnl_internal=True,
constructor_argument='default_variable')
learning_rate = Parameter(1.0, modulable=True)
loss_function = Parameter(None, read_only=True)

activation_input = Parameter([0], read_only=True, getter=_activation_input_getter)
activation_output = Parameter([0], read_only=True, getter=_activation_output_getter)
error_signal = Parameter([0], read_only=True, getter=_error_signal_getter)

error_matrix = Parameter(None, read_only=True)

activation_derivative_fct = Parameter(Logistic.derivative, stateful=False, loggable=False)

default_learning_rate = 1.0

@tc.typecheck
def __init__(self,
default_variable=None,
activation_derivative_fct: tc.optional(tc.optional(tc.any(types.FunctionType, types.MethodType))) = None,
activation_derivative_fct: tc.optional(tc.optional(tc.any(types.FunctionType, types.MethodType)))=None,
# learning_rate: tc.optional(tc.optional(parameter_spec)) = None,
learning_rate=None,
loss_function=None,
Expand Down
Loading

0 comments on commit b7816cb

Please sign in to comment.