forked from sjgershm/RL-models
-
Notifications
You must be signed in to change notification settings - Fork 0
/
set_opts.m
109 lines (94 loc) · 3.79 KB
/
set_opts.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
function [opts, param] = set_opts(opts)
% Fill in missing options and create parameter structure
%
% USAGE: [opts, param] = set_opts([opts])
%
% INPUTS:
% opts - options structure with any of the following fields:
% .go_bias - bias towards "go" action (default: false)
% .sticky - choice stickiness (default: false)
% .dual_learning_rate - different learning rates for pos and
% neg prediction errors (default: false)
% .lapse - lapse rate (default: false)
% .inverse_temp - inverse temperature (default: true)
% .pavlovian_bias - bias towards "go" action for states with
% high average reward (default: false)
% .sensitivity - reward/punishment sensitivity. 0 =
% sensitivity fixed at 1; 1 = common sensitivity for rewards
% and punishments; 2 = different sensitivity for rewards and
% punishmesn (default: 0)
%
% OUTPUTS:
% opts - options structure with missing fields added
% param - parameter structure
%
% Sam Gershman, Nov 2015
% default options
def_opts.go_bias = false;
def_opts.sticky = false;
def_opts.dual_learning_rate = false;
def_opts.lapse = false;
def_opts.inverse_temp = true;
def_opts.pavlovian_bias = false;
def_opts.sensitivity = 0;
def_opts.latents = false;
% fill in missing or empty fields
if nargin < 1 || isempty(opts)
opts = def_opts;
else
F = fieldnames(def_opts);
for f = 1:length(F)
if ~isfield(opts,F{f}) || isempty(opts.(F{f}))
opts.(F{f}) = def_opts.(F{f});
end
end
end
opts.ix = ones(1,9);
if ~opts.go_bias; opts.ix(6) = 0; end
if ~opts.sticky; opts.ix(4) = 0; end
if ~opts.dual_learning_rate; opts.ix(3) = 0; end
if ~opts.lapse; opts.ix(9) = 0; end
if ~opts.inverse_temp; opts.ix(1) = 0; end
if ~opts.pavlovian_bias; opts.ix(5) = 0; end
if opts.sensitivity == 0; opts.ix(7:8) = 0; end
if opts.sensitivity == 1; opts.ix(8) = 0; end
%---------- create parameter structure ---------------%
param(1).name = 'beta';
param(1).hp = [3 2]; % hyperparameters of the gamma prior
param(1).logpdf = @(x) sum(log(gampdf(x,param(1).hp(1),param(1).hp(2)))); % log density function for prior
param(1).lb = 1e-8; % lower bound
param(1).ub = 50; % upper bound
param(1).fit = @(x) gamfit(x);
param(2).name = 'lr_pos';
param(2).hp = [1.2 1.2]; % hyperparameters of beta prior
param(2).logpdf = @(x) sum(log(betapdf(x,param(2).hp(1),param(2).hp(2))));
param(2).lb = 0;
param(2).ub = 1;
param(2).fit = @(x) betafit(x);
param(3) = param(2);
param(3).name = 'lr_neg';
param(4).name = 'choice stickiness';
param(4).hp = [0 10]; % hyperparameters of the normal prior
param(4).logpdf = @(x) sum(log(normpdf(x,param(4).hp(1),param(4).hp(2)))); % log density function for prior
param(4).lb = -5; % lower bound
param(4).ub = 5; % upper bound
param(4).fit = @(x) [mean(x) std(x)];
param(5) = param(4);
param(5).name = 'pi';
param(5).logpdf = @(x) 0;
param(5).lb = 0; % lower bound
param(5).ub = 10; % upper bound
param(6) = param(4);
param(6).name = 'b';
param(6).logpdf = @(x) 0;
param(7) = param(1);
param(7).name = 'rho_pos';
param(8) = param(7);
param(8).name = 'rho_neg';
param(9).name = 'epsilon';
param(9).hp = [1.2 4]; % hyperparameters of beta prior
param(9).logpdf = @(x) sum(log(betapdf(x,param(9).hp(1),param(9).hp(2))));
param(9).lb = 0;
param(9).ub = 0.99;
param(9).fit = @(x) betafit(x);
param = param(opts.ix==1);