Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sourcery Starbot ⭐ refactored curenosm/MeIA #3

Open
wants to merge 1 commit into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 6 additions & 8 deletions modulo_2/sesion_1/src/exercise-S1-3-01.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
def test(model, test_loader):
start_test = True
with torch.no_grad():
for batch_idx, data in enumerate(test_loader):
for data in test_loader:
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function test refactored with the following changes:

# get batch data
samples = data[0].float().cuda()
labels = data[1].long().cuda()
Expand Down Expand Up @@ -93,8 +93,7 @@ def kfcv(X, Y, subjects, args):

list_metrics_clsf = []

for i, (train_index, test_index) in enumerate(skf.split(x, y)):

for train_index, test_index in skf.split(x, y):
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function kfcv refactored with the following changes:

x_train, y_train = x[train_index], y[train_index]
x_test, y_test = x[test_index], y[test_index]

Expand Down Expand Up @@ -139,7 +138,7 @@ def kfcv(X, Y, subjects, args):
iter_train = iter(source_loader)
list_loss = []

for c in range(len(source_loader)):
for _ in range(len(source_loader)):
# get batch
samples, labels = iter_train.next()
samples = samples.float().cuda()
Expand Down Expand Up @@ -179,10 +178,9 @@ def kfcv(X, Y, subjects, args):
list_metrics_clsf = np.array(list_metrics_clsf)

# Save Classification Metrics
save_file = args.dir_resume + "/kfcv-results.csv"
f = open(save_file, 'ab')
np.savetxt(f, list_metrics_clsf, delimiter=",", fmt='%0.4f')
f.close()
save_file = f"{args.dir_resume}/kfcv-results.csv"
with open(save_file, 'ab') as f:
np.savetxt(f, list_metrics_clsf, delimiter=",", fmt='%0.4f')


def main(args):
Expand Down
19 changes: 9 additions & 10 deletions modulo_2/sesion_1/src/exercise-S1-3-02.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
def test(model, test_loader):
start_test = True
with torch.no_grad():
for batch_idx, data in enumerate(test_loader):
for data in test_loader:
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function test refactored with the following changes:

# get batch data
samples = data[0].float().cuda()
labels = data[1].long().cuda()
Expand Down Expand Up @@ -79,7 +79,7 @@ def losocv(X, Y, subjects, args):

# variable used to save accuracy results
list_metrics_clsf = []

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function losocv refactored with the following changes:

# Extract pairs between indexes and subjects
fold_pairs = get_subject_indices(subjects)

Expand All @@ -88,7 +88,7 @@ def losocv(X, Y, subjects, args):
print('Beginning fold {0} out of {1}'.format(foldNum+1, len(fold_pairs)))

# Only Subjects 1, 2 are executed
if foldNum + 1 >= 3:
if foldNum >= 2:
continue

# Divide dataset into training, validation and testing sets
Expand Down Expand Up @@ -130,7 +130,7 @@ def losocv(X, Y, subjects, args):
iter_train = iter(source_loader)
list_loss = []

for c in range(len(source_loader)):
for _ in range(len(source_loader)):
# get batch
samples, labels = iter_train.next()
samples = samples.float().cuda()
Expand Down Expand Up @@ -164,20 +164,19 @@ def losocv(X, Y, subjects, args):
print('Epoch: %d loss: %4f Acc: %.4f F1-score: %.4f AUC: %.4f' % (epoch+1, avg_loss, acc_test, f1_test, auc_test))

print('Saving model...')
torch.save(model.state_dict(), 'trained_model/source' + str(foldNum+1) + '.pt')
torch.save(model.state_dict(), f'trained_model/source{str(foldNum + 1)}.pt')

print("\n")
# add to list
list_metrics_clsf.append([acc_test, f1_test, auc_test, foldNum+1])

# To np array
list_metrics_clsf = np.array(list_metrics_clsf)

# Save Classification Metrics
save_file = args.dir_resume+"/losocv-results.csv"
f=open(save_file, 'ab')
np.savetxt(f, list_metrics_clsf, delimiter=",", fmt='%0.4f')
f.close()
save_file = f"{args.dir_resume}/losocv-results.csv"
with open(save_file, 'ab') as f:
np.savetxt(f, list_metrics_clsf, delimiter=",", fmt='%0.4f')


def main(args):
Expand Down
54 changes: 27 additions & 27 deletions modulo_2/sesion_1/src/utils/modules_pbashivan.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,16 +31,16 @@ def load_data(data_file):
-------
data: array_like
"""
print("Loading data from %s" % (data_file))
print(f"Loading data from {data_file}")

dataMat = scipy.io.loadmat(data_file, mat_dtype=True)
all_data = np.array(dataMat['features'])

data = np.array(all_data[:,:len(all_data[0])-1])
labels = np.array(all_data[:,len(all_data[0])-1])

print("Data loading complete. Shape is %r" % (dataMat['features'].shape,))

Comment on lines -34 to +43
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function load_data refactored with the following changes:

return data, labels.T - 1 # Sequential indices


Expand Down Expand Up @@ -278,36 +278,34 @@ def get_subject_indices_kfcv(id_subjects):


def load_bashivan_data(data_dir, n_channels = 64, n_windows=7, n_bands=3, generate_images=False, size_image=32, visualize=False):

"""
Module used to load dataset of bashivan et al. 2014.
"""

# load data pbashivan
data, labels = load_data(data_dir + "FeatureMat_timeWin.mat")
data, labels = load_data(f"{data_dir}FeatureMat_timeWin.mat")
print("Original data:",data.shape, labels.shape)

if generate_images:

# NOTE: Only a 3D projection is proporcionated, then it is not avaliable other
# records with positions.

#Load locations in 3D
locs_orig = scipy.io.loadmat(data_dir+'Neuroscan_locs_orig.mat', mat_dtype=True)
locs_orig = scipy.io.loadmat(
f'{data_dir}Neuroscan_locs_orig.mat', mat_dtype=True
)
locs3D = locs_orig['A']

#Convert to 2D
locs2D =[]
for e in locs3D:
locs2D.append(azim_proj(e))


locs2D = [azim_proj(e) for e in locs3D]
#save in numpy array
locs2D = np.array(locs2D)

# visualize projection
if visualize:
print("No. channels:",locs3D.shape)

Comment on lines -281 to +308
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function load_bashivan_data refactored with the following changes:

This removes the following comments ( why? ):

#Convert to 2D

# Plot in 3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
Expand All @@ -316,7 +314,7 @@ def load_bashivan_data(data_dir, n_channels = 64, n_windows=7, n_bands=3, genera
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')

# Plot in 2D
fig = plt.figure()
ax = fig.add_subplot(111)
Expand All @@ -325,7 +323,7 @@ def load_bashivan_data(data_dir, n_channels = 64, n_windows=7, n_bands=3, genera
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
plt.show()

# NOTE: database is defined with 7 time windows
# FFT power values extracted for three frequency bands (theta, alpha, beta).
# Features are arranged in band and electrodes order (theta_1, theta_2...,
Expand All @@ -347,18 +345,20 @@ def load_bashivan_data(data_dir, n_channels = 64, n_windows=7, n_bands=3, genera
images = np.array(images)
# transpose
images = images.transpose((1, 2, 0, 3, 4))
scipy.io.savemat(data_dir+'images.mat', mdict={'images': images})
scipy.io.savemat(f'{data_dir}images.mat', mdict={'images': images})

else:
#Load locations in 3D
files_mat = scipy.io.loadmat(data_dir+'images.mat', mat_dtype=True)
files_mat = scipy.io.loadmat(f'{data_dir}images.mat', mat_dtype=True)
images = files_mat['images']

#Load info subjects associated with trials. List of patiens for 2670 trials
subjects_trials= scipy.io.loadmat(data_dir+'trials_subNums.mat', mat_dtype=True)
subjects_trials = scipy.io.loadmat(
f'{data_dir}trials_subNums.mat', mat_dtype=True
)
subjNumbers = np.squeeze(subjects_trials['subjectNum'])


print("Shape images", images.shape)

return images, labels, subjNumbers
14 changes: 6 additions & 8 deletions modulo_2/sesion_2/src/exercise-S1-3-01.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
def test(model, test_loader):
start_test = True
with torch.no_grad():
for batch_idx, data in enumerate(test_loader):
for data in test_loader:
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function test refactored with the following changes:

# get batch data
samples = data[0].float().cuda()
labels = data[1].long().cuda()
Expand Down Expand Up @@ -93,8 +93,7 @@ def kfcv(X, Y, subjects, args):

list_metrics_clsf = []

for i, (train_index, test_index) in enumerate(skf.split(x, y)):

for train_index, test_index in skf.split(x, y):
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function kfcv refactored with the following changes:

x_train, y_train = x[train_index], y[train_index]
x_test, y_test = x[test_index], y[test_index]

Expand Down Expand Up @@ -139,7 +138,7 @@ def kfcv(X, Y, subjects, args):
iter_train = iter(source_loader)
list_loss = []

for c in range(len(source_loader)):
for _ in range(len(source_loader)):
# get batch
samples, labels = iter_train.next()
samples = samples.float().cuda()
Expand Down Expand Up @@ -179,10 +178,9 @@ def kfcv(X, Y, subjects, args):
list_metrics_clsf = np.array(list_metrics_clsf)

# Save Classification Metrics
save_file = args.dir_resume + "/kfcv-results.csv"
f = open(save_file, 'ab')
np.savetxt(f, list_metrics_clsf, delimiter=",", fmt='%0.4f')
f.close()
save_file = f"{args.dir_resume}/kfcv-results.csv"
with open(save_file, 'ab') as f:
np.savetxt(f, list_metrics_clsf, delimiter=",", fmt='%0.4f')


def main(args):
Expand Down
19 changes: 9 additions & 10 deletions modulo_2/sesion_2/src/exercise-S1-3-02.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
def test(model, test_loader):
start_test = True
with torch.no_grad():
for batch_idx, data in enumerate(test_loader):
for data in test_loader:
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function test refactored with the following changes:

# get batch data
samples = data[0].float().cuda()
labels = data[1].long().cuda()
Expand Down Expand Up @@ -79,7 +79,7 @@ def losocv(X, Y, subjects, args):

# variable used to save accuracy results
list_metrics_clsf = []

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function losocv refactored with the following changes:

# Extract pairs between indexes and subjects
fold_pairs = get_subject_indices(subjects)

Expand All @@ -88,7 +88,7 @@ def losocv(X, Y, subjects, args):
print('Beginning fold {0} out of {1}'.format(foldNum+1, len(fold_pairs)))

# Only Subjects 1, 2 are executed
if foldNum + 1 >= 3:
if foldNum >= 2:
continue

# Divide dataset into training, validation and testing sets
Expand Down Expand Up @@ -130,7 +130,7 @@ def losocv(X, Y, subjects, args):
iter_train = iter(source_loader)
list_loss = []

for c in range(len(source_loader)):
for _ in range(len(source_loader)):
# get batch
samples, labels = iter_train.next()
samples = samples.float().cuda()
Expand Down Expand Up @@ -164,20 +164,19 @@ def losocv(X, Y, subjects, args):
print('Epoch: %d loss: %4f Acc: %.4f F1-score: %.4f AUC: %.4f' % (epoch+1, avg_loss, acc_test, f1_test, auc_test))

print('Saving model...')
torch.save(model.state_dict(), 'trained_model/source' + str(foldNum+1) + '.pt')
torch.save(model.state_dict(), f'trained_model/source{str(foldNum + 1)}.pt')

print("\n")
# add to list
list_metrics_clsf.append([acc_test, f1_test, auc_test, foldNum+1])

# To np array
list_metrics_clsf = np.array(list_metrics_clsf)

# Save Classification Metrics
save_file = args.dir_resume+"/losocv-results.csv"
f=open(save_file, 'ab')
np.savetxt(f, list_metrics_clsf, delimiter=",", fmt='%0.4f')
f.close()
save_file = f"{args.dir_resume}/losocv-results.csv"
with open(save_file, 'ab') as f:
np.savetxt(f, list_metrics_clsf, delimiter=",", fmt='%0.4f')


def main(args):
Expand Down
21 changes: 10 additions & 11 deletions modulo_2/sesion_2/src/exercise-S2-2-01.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
def test(network_f, network_h, test_loader):
start_test = True
with torch.no_grad():
for batch_idx, data in enumerate(test_loader):
for data in test_loader:
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function test refactored with the following changes:

# get batch data
samples = data[0].float().cuda()
labels = data[1].long().cuda()
Expand Down Expand Up @@ -80,16 +80,16 @@ def losocv(X, Y, subjects, args):

# variable used to save accuracy results
list_metrics_clsf = []

# Extract pairs between indexes and subjects
fold_pairs = get_subject_indices(subjects)

# Iterate over fold_pairs
for foldNum, fold in enumerate(fold_pairs):
print('Beginning fold {0} out of {1}'.format(foldNum+1, len(fold_pairs)))

# Only Subjects 1, 2 are executed
if foldNum + 1 >= 3:# and foldNum + 1 <= 11:
if foldNum >= 2:# and foldNum + 1 <= 11:
Comment on lines -83 to +92
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function losocv refactored with the following changes:

continue

# Get source and target datasets
Expand Down Expand Up @@ -144,6 +144,8 @@ def losocv(X, Y, subjects, args):
# Almacenar pérdida
list_loss = []

# [Total loss]
lambda_dis = 1.0
for iter_num in range(0, args.max_iterations + 1):
network_f.train()
network_h.train()
Expand Down Expand Up @@ -176,8 +178,6 @@ def losocv(X, Y, subjects, args):
# [Classification Loss]
classifier_loss = criterion(outputs_source, labels_source)

# [Total loss]
lambda_dis = 1.0
total_loss = classifier_loss + lambda_dis * transfer_loss

# Reset gradients
Expand Down Expand Up @@ -209,15 +209,14 @@ def losocv(X, Y, subjects, args):
print("\n")
# add to list
list_metrics_clsf.append([acc, f1, auc, foldNum+1])

# To np array
list_metrics_clsf = np.array(list_metrics_clsf)

# Save Classification Metrics
save_file = args.dir_resume+"/losocv-results.csv"
f=open(save_file, 'ab')
np.savetxt(f, list_metrics_clsf, delimiter=",", fmt='%0.4f')
f.close()
save_file = f"{args.dir_resume}/losocv-results.csv"
with open(save_file, 'ab') as f:
np.savetxt(f, list_metrics_clsf, delimiter=",", fmt='%0.4f')


def main(args):
Expand Down
Loading