forked from FerranAlet/modular-metalearning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlayers.py
64 lines (52 loc) · 2.35 KB
/
layers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
'''
Copied from katerakelly/pytorch-maml
'''
import numpy as np
from torch import nn
import torch
from torch.nn import functional as F
'''
Functional definitions of common layers
Useful for when weights are exposed rather
than being contained in modules
'''
def linear(input, weight, bias=None):
if bias is None:
return F.linear(input, weight.cuda())
else:
return F.linear(input, weight.cuda(), bias.cuda())
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
return F.conv2d(input, weight.cuda(), bias.cuda(), stride, padding, dilation, groups)
def relu(input):
return F.threshold(input, 0, 0, inplace=True)
def maxpool(input, kernel_size, stride=None):
return F.max_pool2d(input, kernel_size, stride)
def batchnorm(input, weight=None, bias=None, running_mean=None, running_var=None, training=True, eps=1e-5, momentum=0.1):
''' momentum = 1 restricts stats to the current mini-batch '''
# This hack only works when momentum is 1 and avoids needing to track running stats
# by substuting dummy variables
running_mean = torch.zeros(np.prod(np.array(input.data.size()[1]))).cuda()
running_var = torch.ones(np.prod(np.array(input.data.size()[1]))).cuda()
return F.batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
def bilinear_upsample(in_, factor):
return F.upsample(in_, None, factor, 'bilinear')
def log_softmax(input):
return F.log_softmax(input)
class exponential(nn.Module):
def __init__(self):
super(exponential, self).__init__()
def forward(self, x):
return torch.exp(x)
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
self.hidden_two = torch.nn.Linear(n_hidden, n_hidden) # hidden layer
self.hidden_3 = torch.nn.Linear(n_hidden, n_hidden) # hidden layer
self.predict = torch.nn.Linear(n_hidden, n_output) # output layer
def forward(self, x):
x = F.relu(self.hidden(x)) # activation function for hidden layer
x = F.relu(self.hidden_two(x)) # activation function for hidden layer
x = F.relu(self.hidden_3(x)) # activation function for hidden layer
x = self.predict(x) # linear output
return x