diff --git a/pylearn2/expr/nnet.py b/pylearn2/expr/nnet.py index 3d36cc71b6..c816e5b810 100644 --- a/pylearn2/expr/nnet.py +++ b/pylearn2/expr/nnet.py @@ -114,6 +114,46 @@ def arg_of_softmax(Y_hat): return z +def arg_of_sigmoid(Y_hat): + """ + Given the output of a call to theano.tensor.nnet.sigmoid, + returns the argument to the sigmoid (by tracing the Theano + graph). + + Parameters + ---------- + Y_hat : Variable + T.nnet.sigmoid(Z) + + Returns + ------- + Z : Variable + The variable that was passed to T.nnet.sigmoid to create `Y_hat`. + Raises an error if `Y_hat` is not actually the output of a theano + sigmoid. + """ + assert hasattr(Y_hat, 'owner') + owner = Y_hat.owner + assert owner is not None + op = owner.op + if isinstance(op, Print): + assert len(owner.inputs) == 1 + Y_hat, = owner.inputs + owner = Y_hat.owner + op = owner.op + success = False + if isinstance(op, T.Elemwise): + if isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid): + success = True + if not success: + raise TypeError("Expected Y_hat to be the output of a sigmoid, " + "but it appears to be the output of " + str(op) + + " of type " + str(type(op))) + z, = owner.inputs + assert z.ndim == 2 + return z + + def kl(Y, Y_hat, batch_axis): """ Warning: This function expects a sigmoid nonlinearity in the @@ -323,4 +363,4 @@ def compute_f1(precision, recall): """ f1 = (2. * precision * recall / T.maximum(1, precision + recall)) - return f1 \ No newline at end of file + return f1 diff --git a/pylearn2/expr/tests/test_nnet.py b/pylearn2/expr/tests/test_nnet.py index 6de0f1c3b7..2d8277770b 100644 --- a/pylearn2/expr/tests/test_nnet.py +++ b/pylearn2/expr/tests/test_nnet.py @@ -14,12 +14,13 @@ from theano import tensor as T from pylearn2.models.mlp import MLP, Sigmoid +from pylearn2.expr.nnet import arg_of_sigmoid from pylearn2.expr.nnet import pseudoinverse_softmax_numpy from pylearn2.expr.nnet import softmax_numpy from pylearn2.expr.nnet import softmax_ratio from pylearn2.expr.nnet import compute_recall from pylearn2.expr.nnet import kl -from pylearn2.expr.nnet import elemwise_kl +from pylearn2.expr.nnet import elemwise_kl from pylearn2.utils import sharedX @@ -83,7 +84,7 @@ def test_kl(): """ init_mode = theano.config.compute_test_value theano.config.compute_test_value = 'raise' - + try: mlp = MLP(layers=[Sigmoid(dim=10, layer_name='Y', irange=0.1)], nvis=10) @@ -101,7 +102,7 @@ def test_kl(): np.testing.assert_raises(ValueError, kl, Y, Y_hat, 1) Y.tag.test_value[2][3] = -0.1 np.testing.assert_raises(ValueError, kl, Y, Y_hat, 1) - + finally: theano.config.compute_test_value = init_mode @@ -112,10 +113,10 @@ def test_elemwise_kl(): input. """ init_mode = theano.config.compute_test_value - theano.config.compute_test_value = 'raise' - + theano.config.compute_test_value = 'raise' + try: - mlp = MLP(layers=[Sigmoid(dim=10, layer_name='Y', irange=0.1)], + mlp = MLP(layers=[Sigmoid(dim=10, layer_name='Y', irange=0.1)], nvis=10) X = mlp.get_input_space().make_theano_batch() Y = mlp.get_output_space().make_theano_batch() @@ -131,8 +132,29 @@ def test_elemwise_kl(): np.testing.assert_raises(ValueError, elemwise_kl, Y, Y_hat) Y.tag.test_value[2][3] = -0.1 np.testing.assert_raises(ValueError, elemwise_kl, Y, Y_hat) - + finally: theano.config.compute_test_value = init_mode - +def test_arg_of_sigmoid_good(): + """ + Tests that arg_of_sigmoid works when given a good input. + """ + + X = T.matrix() + Y = T.nnet.sigmoid(X) + Z = arg_of_sigmoid(Y) + assert X is Z + +def test_arg_of_sigmoid_bad(): + """ + Tests that arg_of_sigmoid raises an error when given a bad input. + """ + + X = T.matrix() + Y = T.nnet.softmax(X) + try: + Z = arg_of_sigmoid(Y) + except TypeError: + return + assert False # Should have failed