Skip to content

Commit

Permalink
STYLE: Prefer importing numpy as np
Browse files Browse the repository at this point in the history
Prefer importing `numpy` as `np` to be consistent with current common
practice and for the sake of compactness.
  • Loading branch information
jhlegarreta committed Oct 22, 2023
1 parent ee43eac commit 417fa48
Show file tree
Hide file tree
Showing 45 changed files with 710 additions and 710 deletions.
22 changes: 11 additions & 11 deletions bin/wm_append_diffusion_measures_across_subjects.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import glob
import os

import numpy
import numpy as np
import pandas


Expand Down Expand Up @@ -82,8 +82,8 @@ def main():

data.append(stats_data_vec)

data = numpy.array(data)
data = numpy.concatenate([numpy.array(subject_IDs)[:, numpy.newaxis], data], axis = 1)
data = np.array(data)
data = np.concatenate([np.array(subject_IDs)[:, np.newaxis], data], axis = 1)

df = pandas.DataFrame(data, columns=appended_fields)

Expand Down Expand Up @@ -111,19 +111,19 @@ def main():

if len(clusters) == 800 and len(tracts) == 73:
comm_clusters = [3, 8, 33, 40, 46, 52, 56, 57, 62, 68, 69, 73, 86, 91, 109, 110, 114, 142, 144, 145, 146, 159, 163, 250, 251, 252, 257, 262, 263, 268, 271, 305, 311, 314, 322, 330, 334, 338, 342, 350, 363, 371, 375, 403, 410, 437, 440, 448, 456, 465, 468, 475, 484, 485, 488, 519, 522, 525, 543, 545, 549, 557, 576, 577, 582, 587, 591, 597, 601, 614, 620, 623, 627, 633, 645, 653, 658, 663, 670, 677, 683, 702, 770, 781]
comm_clusters = numpy.array(comm_clusters)
hemi_clusters = numpy.setdiff1d(numpy.arange(800), comm_clusters)
comm_clusters = np.array(comm_clusters)
hemi_clusters = np.setdiff1d(np.arange(800), comm_clusters)
elif len(clusters) == 800 and len(tracts) == 74: # CPC separated
comm_clusters = [3, 8, 33, 40, 46, 52, 56, 57, 62, 68, 69, 73, 86, 91, 109, 110, 114, 142, 144, 146, 163, 250, 251, 252, 257, 262, 263, 268, 271, 305, 311, 314, 322, 330, 334, 338, 342, 350, 363, 371, 375, 403, 410, 437, 440, 448, 456, 465, 468, 475, 484, 485, 488, 519, 522, 525, 543, 545, 549, 576, 577, 582, 587, 591, 597, 601, 614, 620, 623, 627, 633, 645, 653, 658, 663, 670, 683, 702, 781]
comm_clusters = numpy.array(comm_clusters)
hemi_clusters = numpy.setdiff1d(numpy.arange(800), comm_clusters)
comm_clusters = np.array(comm_clusters)
hemi_clusters = np.setdiff1d(np.arange(800), comm_clusters)
else:
comm_clusters = None
hemi_clusters = None

locations = ['left_hemisphere', 'right_hemisphere', 'commissural']
appended_fields = ['subjectkey']
clusters = numpy.array(clusters)
clusters = np.array(clusters)
for loc in locations:
if loc == 'left_hemisphere' or loc == 'right_hemisphere':
clusters_loc = clusters[hemi_clusters]
Expand Down Expand Up @@ -161,16 +161,16 @@ def main():
stats_data = stats_data[comm_clusters, :]
c_stats_data_vec = stats_data.flatten()

stats_data_vec = numpy.concatenate([l_stats_data_vec, r_stats_data_vec, c_stats_data_vec])
stats_data_vec = np.concatenate([l_stats_data_vec, r_stats_data_vec, c_stats_data_vec])

if stats_data_vec.shape[0] != len(appended_fields) - 1:
print("Error: Check if the diffusion measure file has the same rows and columns with other subjects!")
exit()

data.append(stats_data_vec)

data = numpy.array(data)
data = numpy.concatenate([numpy.array(subject_IDs)[:, numpy.newaxis], data], axis = 1)
data = np.array(data)
data = np.concatenate([np.array(subject_IDs)[:, np.newaxis], data], axis = 1)

df = pandas.DataFrame(data, columns=appended_fields)

Expand Down
20 changes: 10 additions & 10 deletions bin/wm_assess_cluster_location_by_hemisphere.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
# and this can be learned. At that point all data are midsagitally aligned, which this requires.
# For running this per-subject, the alignment should be performed to handle the tracts near the midline better.
# That should be added as an option.
import numpy
import numpy as np
import vtk

import whitematteranalysis as wma
Expand Down Expand Up @@ -116,7 +116,7 @@ def _read_location(_inpd):

return _flag_location

mask_location = numpy.zeros(pd.GetNumberOfLines())
mask_location = np.zeros(pd.GetNumberOfLines())

inpointdata = inpd.GetPointData()
flag_location = False
Expand Down Expand Up @@ -151,7 +151,7 @@ def _read_location(_inpd):
print(f"<{os.path.basename(__file__)}> Cluster location file is assigned but the file", clusterLocationFile, "does not exist.")
exit()
else:
location_data = numpy.loadtxt(open(clusterLocationFile, "rb"),
location_data = np.loadtxt(open(clusterLocationFile, "rb"),
dtype={'names': ('Cluster Index', 'Location Label'), 'formats': ('S17', 'S1')},
delimiter="\t", skiprows=1)

Expand Down Expand Up @@ -289,20 +289,20 @@ def _read_location(_inpd):
wma.io.write_polydata(pd, fname)

# for sanity check
if len(numpy.where(mask_location ==0)[0]) > 1:
if len(np.where(mask_location ==0)[0]) > 1:
print("Error: Not all fibers in", fname, "is labeled with hemisphere location infromation.")
exit()

if outdir is not None:

# output separated clusters
mask_right = numpy.zeros(pd.GetNumberOfLines())
mask_left = numpy.zeros(pd.GetNumberOfLines())
mask_commissure = numpy.zeros(pd.GetNumberOfLines())
mask_right = np.zeros(pd.GetNumberOfLines())
mask_left = np.zeros(pd.GetNumberOfLines())
mask_commissure = np.zeros(pd.GetNumberOfLines())

mask_left[numpy.where(mask_location==1)[0]] = 1
mask_right[numpy.where(mask_location==2)[0]] = 1
mask_commissure[numpy.where(mask_location==3)[0]] = 1
mask_left[np.where(mask_location==1)[0]] = 1
mask_right[np.where(mask_location==2)[0]] = 1
mask_commissure[np.where(mask_location==3)[0]] = 1

pd_right = wma.filter.mask(pd, mask_right, preserve_point_data=True, preserve_cell_data=True, verbose=False)
pd_left = wma.filter.mask(pd, mask_left, preserve_point_data=True, preserve_cell_data=True, verbose=False)
Expand Down
24 changes: 12 additions & 12 deletions bin/wm_average_tract_measures.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import os
import re

import numpy
import numpy as np
import pandas


Expand Down Expand Up @@ -45,7 +45,7 @@ def main():
fields = []
for col in stats.columns:
fields.append(col)
fields = numpy.array(fields)
fields = np.array(fields)

print(fields)

Expand All @@ -65,7 +65,7 @@ def main():
print(fields[indices])
append_list.append(indices)

append_list = numpy.array(append_list)
append_list = np.array(append_list)
append_measures = [field.replace(args.tractList[-1], '') for field in fields[indices]]

print("Output measures:", append_measures)
Expand All @@ -75,21 +75,21 @@ def main():

if m_name == '.Num_Points':

avg_stat = numpy.sum(stats.to_numpy()[:, append_list[:, m_idx]], axis=1)
avg_stat = np.sum(stats.to_numpy()[:, append_list[:, m_idx]], axis=1)

elif m_name == '.Num_Fibers':

avg_stat = numpy.sum(stats.to_numpy()[:, append_list[:, m_idx]], axis=1)
avg_stat = np.sum(stats.to_numpy()[:, append_list[:, m_idx]], axis=1)

elif m_name == '.Mean_Length': # weighted by NoS

weight = stats.to_numpy()[:, append_list[:, 1]]
val = stats.to_numpy()[:, append_list[:, m_idx]]

val_weighted_sum = numpy.sum(val * weight, axis=1)
weight_sum = numpy.sum(weight, axis=1)
val_weighted_sum = np.sum(val * weight, axis=1)
weight_sum = np.sum(weight, axis=1)

empty_indices = numpy.where(weight_sum == 0)[0]
empty_indices = np.where(weight_sum == 0)[0]
weight_sum[empty_indices] = 1

avg_stat = val_weighted_sum / weight_sum
Expand All @@ -100,18 +100,18 @@ def main():
weight = stats.to_numpy()[:, append_list[:, 0]]
val = stats.to_numpy()[:, append_list[:, m_idx]]

val_weighted_sum = numpy.sum(val.astype(numpy.double) * weight.astype(numpy.double), axis=1)
weight_sum = numpy.sum(weight, axis=1)
val_weighted_sum = np.sum(val.astype(np.double) * weight.astype(np.double), axis=1)
weight_sum = np.sum(weight, axis=1)

empty_indices = numpy.where(weight_sum == 0)[0]
empty_indices = np.where(weight_sum == 0)[0]
weight_sum[empty_indices] = 1

avg_stat = val_weighted_sum / weight_sum
avg_stat[empty_indices] = -1

avg_stats.append(avg_stat)

avg_stats = numpy.array(avg_stats)
avg_stats = np.array(avg_stats)
avg_stats = avg_stats.transpose()

column_names = ['subjectkey']
Expand Down
2 changes: 1 addition & 1 deletion bin/wm_change_nrrd_dir.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import multiprocessing
import os

import numpy
import numpy as np
import vtk
from joblib import Parallel, delayed

Expand Down
Loading

0 comments on commit 417fa48

Please sign in to comment.