You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
In the VAE_Standard_MLP_decoder class, the __init__ funciton assigns parameters using params, but the method does not take params as an argument. Instead, it expects multiple independent parameters. This leads to a KeyError or TypeError when executing the code.
def__init__(self, seq_len, alphabet_size, hidden_layers_sizes, z_dim, first_hidden_nonlinearity, last_hidden_nonlinearity, dropout_proba,
convolve_output, convolution_depth, include_temperature_scaler, include_sparsity, num_tiles_sparsity):
""" Required input parameters: - seq_len: (Int) Sequence length of sequence alignment - alphabet_size: (Int) Alphabet size of sequence alignment (will be driven by the data helper object) - hidden_layers_sizes: (List) List of the sizes of the hidden layers (all DNNs) - z_dim: (Int) Dimension of latent space - first_hidden_nonlinearity: (Str) Type of non-linear activation applied on the first (set of) hidden layer(s) - last_hidden_nonlinearity: (Str) Type of non-linear activation applied on the very last hidden layer (pre-sparsity) - dropout_proba: (Float) Dropout probability applied on all hidden layers. If 0.0 then no dropout applied - convolve_output: (Bool) Whether to perform 1d convolution on output (kernel size 1, stide 1) - convolution_depth: (Int) Size of the 1D-convolution on output - include_temperature_scaler: (Bool) Whether we apply the global temperature scaler - include_sparsity: (Bool) Whether we use the sparsity inducing scheme on the output from the last hidden layer - num_tiles_sparsity: (Int) Number of tiles to use in the sparsity inducing scheme (the more the tiles, the stronger the sparsity) - bayesian_decoder: (Bool) Whether the decoder is bayesian or not """super().__init__()
self.device=torch.device("cuda"iftorch.cuda.is_available() else"cpu")
self.seq_len=params['seq_len']
self.alphabet_size=params['alphabet_size']
self.hidden_layers_sizes=params['hidden_layers_sizes']
self.z_dim=params['z_dim']
self.bayesian_decoder=Falseself.dropout_proba=params['dropout_proba']
self.convolve_output=params['convolve_output']
self.convolution_depth=params['convolution_depth']
self.include_temperature_scaler=params['include_temperature_scaler']
self.include_sparsity=params['include_sparsity']
self.num_tiles_sparsity=params['num_tiles_sparsity']
self.mu_bias_init=0.1self.hidden_layers=nn.ModuleDict()
forlayer_indexinrange(len(self.hidden_layers_sizes)):
iflayer_index==0:
self.hidden_layers[str(layer_index)] =nn.Linear(self.z_dim, self.hidden_layers_sizes[layer_index])
nn.init.constant_(self.hidden_layers[str(layer_index)].bias, self.mu_bias_init)
else:
self.hidden_layers[str(layer_index)] =nn.Linear(self.hidden_layers_sizes[layer_index-1],self.hidden_layers_sizes[layer_index])
nn.init.constant_(self.hidden_layers[str(layer_index)].bias, self.mu_bias_init)
ifparams['first_hidden_nonlinearity'] =='relu':
self.first_hidden_nonlinearity=nn.ReLU()
elifparams['first_hidden_nonlinearity'] =='tanh':
self.first_hidden_nonlinearity=nn.Tanh()
elifparams['first_hidden_nonlinearity'] =='sigmoid':
self.first_hidden_nonlinearity=nn.Sigmoid()
elifparams['first_hidden_nonlinearity'] =='elu':
self.first_hidden_nonlinearity=nn.ELU()
elifparams['first_hidden_nonlinearity'] =='linear':
self.first_hidden_nonlinearity=nn.Identity()
ifparams['last_hidden_nonlinearity'] =='relu':
self.last_hidden_nonlinearity=nn.ReLU()
elifparams['last_hidden_nonlinearity'] =='tanh':
self.last_hidden_nonlinearity=nn.Tanh()
elifparams['last_hidden_nonlinearity'] =='sigmoid':
self.last_hidden_nonlinearity=nn.Sigmoid()
elifparams['last_hidden_nonlinearity'] =='elu':
self.last_hidden_nonlinearity=nn.ELU()
elifparams['last_hidden_nonlinearity'] =='linear':
self.last_hidden_nonlinearity=nn.Identity()
ifself.dropout_proba>0.0:
self.dropout_layer=nn.Dropout(p=self.dropout_proba)
ifself.convolve_output:
self.output_convolution=nn.Conv1d(in_channels=self.convolution_depth,out_channels=self.alphabet_size,kernel_size=1,stride=1,bias=False)
self.channel_size=self.convolution_depthelse:
self.channel_size=self.alphabet_sizeifself.include_sparsity:
self.sparsity_weight=nn.Parameter(torch.randn(int(self.hidden_layers_sizes[-1]/self.num_tiles_sparsity), self.seq_len))
self.W_out=nn.Parameter(torch.zeros(self.channel_size*self.seq_len,self.hidden_layers_sizes[-1]))
nn.init.xavier_normal_(self.W_out) #Initialize weights with Glorot initializationself.b_out=nn.Parameter(torch.zeros(self.alphabet_size*self.seq_len))
nn.init.constant_(self.b_out, self.mu_bias_init)
ifself.include_temperature_scaler:
self.temperature_scaler=nn.Parameter(torch.ones(1))
The text was updated successfully, but these errors were encountered:
HaolingZHANG
changed the title
Incorrect Parameter Assignment in "VAE_Standard_MLP_decoder" Initialization
Bug report: incorrect parameter assignment in "VAE_Standard_MLP_decoder" initialization
Mar 18, 2025
Description:
In the
VAE_Standard_MLP_decoder class
, the__init__
funciton assigns parameters using params, but the method does not take params as an argument. Instead, it expects multiple independent parameters. This leads to a KeyError or TypeError when executing the code.Bug Location:
Start from https://github.com/OATML/EVE/blob/master/EVE/VAE_decoder.py#L173, the screenshot is:
The text was updated successfully, but these errors were encountered: