-
Notifications
You must be signed in to change notification settings - Fork 2
/
ml_gist.py
64 lines (49 loc) · 1.99 KB
/
ml_gist.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import numpy as np
from sklearn.model_selection import train_test_split
X = np.random.random((1000, 5))
Y = np.random.random((1000, 1))
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
#################################################
#################################################
import numpy as np
from sklearn.model_selection import StratifiedKFold
X = np.random.random((1000, 5))
Y = np.concatenate([np.ones((800, 1)), np.zeros((200, 1))], axis=0)
skf = StratifiedKFold(n_splits=10)
train_inds, test_inds = list(skf.split(X, Y))[0]
X_train, X_test = X[train_inds], X[test_inds]
Y_train, Y_test = Y[train_inds], Y[test_inds]
#################################################
#################################################
from keras.preprocessing.text import text_to_word_sequence
text = 'Hello world!'
tokens = text_to_word_sequence(text)
#################################################
#################################################
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout
from sklearn.model_selection import train_test_split
model = Sequential()
model.add(Dense(8, input_dim=5 , activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
X = np.random.random((1000, 5)) - 0.5
Y = np.all(X[:, [1, 3]] > 0, axis=1).astype(float)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1)
model.fit(X_train, Y_train, batch_size=32, epochs=500, validation_data=(X_test, Y_test))
#################################################
# 1D Convolution in PyTorch
#################################################
import torch
from torch.nn import Cov1d
batch_size = 16
word_emb_size = 4
seq_len = 7
input = torch.randn(batch_size, word_emb_size, seq_len)
conv1 = Conv1d(in_channels=word_emb_size, out_channels=3, kernel_size=3)
hidden1 = conv1(input)
hidden2 = torch.max(hidden1, dim=2)