forked from brando90/Generalization-Puzzles-in-Deep-Networks
-
Notifications
You must be signed in to change notification settings - Fork 0
/
scaling_NATURE.m
112 lines (110 loc) · 4.91 KB
/
scaling_NATURE.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
%clear;clc;
%load('./pytorch_experiments/test_runs_flatness4/loss_vs_gen_errors_norm_frobenius')
%load('./pytorch_experiments/test_runs_flatness5_ProperOriginalExpt/loss_vs_gen_errors_norm_frobenius_final')
%load('./pytorch_experiments/test_runs_flatness5_ProperOriginalExpt/loss_vs_gen_errors_norm_l1')
%load('./pytorch_experiments/test_runs_flatness5_ProperOriginalExpt/loss_vs_gen_errors_norm_l1_divided_by_10')
%load('./pytorch_experiments/test_runs_flatness5_ProperOriginalExpt/loss_vs_gen_errors_norm_l1_divided_by_100')RL_corruption_1.0_loss_vs_gen_errors_norm_l2
%load('./pytorch_experiments/test_runs_flatness5_ProperOriginalExpt/RL_corruption_1.0_loss_vs_gen_errors_norm_l2')
%%
markers = corruption_all_probs;
%markers = std_inits_all;
%%%%RLs = 8:19;
RLs = 62:73;
train_all_losses_normalized(RLs) = train_all_losses_normalized_rand(RLs);
test_all_losses_normalized(RLs) = test_all_losses_normalized_rand(RLs);
train_all_errors_unnormalized(RLs) = train_all_errors_unnormalized_rand(RLs);
gen_all_errors_unnormalized(RLs) = gen_all_errors_unnormalized_rand(RLs);
train_all_losses_unnormalized(RLs) = train_all_losses_unnormalized_rand(RLs);
test_all_losses_unnormalized(RLs) = test_all_losses_unnormalized_rand(RLs);
%% test error vs train error
fig0 = figure;
lscatter(train_all_errors_unnormalized,gen_all_errors_unnormalized,markers)
xlim([-0.05,1])
xlabel('Train Error (Network Normalized)')
ylabel('Test Error (Network Normalized)')
%lsline
xlim([-0.05,1])
xlabel('Train Error (Network Normalized)')
ylabel('Test Error (Network Normalized)')
%% test error vs train loss (all normalized)
fig1 = figure;
lscatter(train_all_losses_normalized,gen_all_errors_normalized,markers)
%lscatter(all_train_errors,gen_all_errors_normalized,markers)
%lsline
%title('The weights of all models are normalized')
xlabel('Train Loss (Network Normalized)')
ylabel('Test Error (Network Normalized)')
%% IMPORTANT: test loss vs train loss (all normalized) - shows the linear correlation of the train loss and test loss
fig2 = figure;
scatter(train_all_losses_normalized,test_all_losses_normalized)
hold;
%
X = train_all_losses_normalized;
y = test_all_losses_normalized;
% X = [train_all_losses_normalized(1:7) train_all_losses_normalized(RLs)];
% y = [test_all_losses_normalized(1:7) test_all_losses_normalized(RLs)];
n = length(X);
Intercept = (1/n)*sum(y-X) % (1/n)*sum(y-x)
%
yCalc1 = X+Intercept;
Rsq1 = 1 - sum((y - yCalc1).^2)/sum((y - mean(y)).^2)
RMSE = sqrt(mean((y - yCalc1).^2)) % Root Mean Squared Error
%
plot(X,yCalc1);
%
xlabel('Train Loss (Network Normalized)')
ylabel('Test Loss (Network Normalized)')
%% IMPORTANT: test error (unnormalized) vs train loss (normalized), this checks if we can predict test error from train loss
fig3 = figure;
lscatter(train_all_losses_normalized,gen_all_errors_unnormalized,markers)
%lsline
%title('Train Loss vs Test Error')
xlabel('Train Loss (Network Normalized)')
ylabel('Test Error (Network Unnormalized)')
%% test loss (unormalized) vs train loss (normalized), not interesting cuz unnormalized loss diverges to infinity
fig4 = figure;
lscatter(train_all_losses_normalized,test_all_losses_unnormalized,markers)
%lsline
%title('Train Loss vs Test Loss')
xlabel('Train Loss (Network Normalized)')
ylabel('Test Loss (Network Unnormalized)')
%% CONTROL1: test loss (unormalized) vs train loss (unnormalized)
fig5 = figure;
scatter(train_all_losses_unnormalized,test_all_losses_unnormalized)
% h = scatter(train_all_losses_unnormalized,test_all_losses_unnormalized)
% c = get(h,'Color')
% c{1}
% c{2}
%lsline
title('Control 1: The weights of all models are unnormalized')
xlabel('Train Loss (Network Unnormalized)')
ylabel('Test Loss (Network Unnormalized)')
xlim([-2.5e-4,9e-3])
%% CONTROL2: test error (unormalized) vs train loss (unnormalized)
fig6 = figure;
lscatter(train_all_losses_unnormalized,gen_all_errors_unnormalized,markers)
%lsline
title('Control 2: The weights of all models are unnormalized')
xlabel('Train Loss (Network Unnormalized)')
ylabel('Test Error (Network Unnormalized)')
%% save
saveas(fig0,'test_error_vs_train_error_all_unnormalized');
saveas(fig0,'test_error_vs_train_error_all_unnormalized','pdf');
%
saveas(fig1,'test_error_vs_train_loss_all_normalized');
saveas(fig1,'test_error_vs_train_loss_all_normalized','pdf');
%
saveas(fig2,'important_test_loss_vs_train_loss_all_normalized');
saveas(fig2,'important_test_loss_vs_train_loss_all_normalized','pdf');
%
saveas(fig3,'important_test_error_vs_train_loss_unnormalized_vs_normalized');
saveas(fig3,'important_test_error_vs_train_loss_unnormalized_vs_normalized','pdf');
%
saveas(fig4,'test_loss_vs_train_loss_unnormalized_vs_normalized');
saveas(fig4,'test_loss_vs_train_loss_unnormalized_vs_normalized','pdf');
%
saveas(fig5,'control1_test_error_vs_train_loss_all_unnormalized');
saveas(fig5,'control1_test_error_vs_train_loss_all_unnormalized','pdf');
%
saveas(fig6,'control2_test_error_vs_train_loss_all_unnormalized');
saveas(fig6,'control2_test_error_vs_train_loss_all_unnormalized','pdf');