-
Notifications
You must be signed in to change notification settings - Fork 0
/
imports.py
169 lines (157 loc) · 5.44 KB
/
imports.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
#
# Copyright (c) University of Luxembourg 2019-2020.
# Created by Hazem FAHMY, [email protected], SNT, 2019.
#
import sys
#globalDir = sys.path
#localDir = sys.path.insert(1, './')
#sys.path = globalDir
import time
import torch
import os
import shutil
import random
import pandas as pd
from os.path import join, basename, isfile, exists, dirname, isdir
from os import makedirs
from scipy.stats import entropy
from scipy.spatial import distance
#import tensorflow.compat.v1 as tf
import tensorflow as tf
import numpy as np
from skimage.segmentation import slic
from skimage.segmentation import felzenszwalb
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
from skimage import io
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import sklearn
import configparser
import statistics as stat
import scipy.stats as sc
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl import Workbook
import argparse
import sys
import math
from sklearn.model_selection import train_test_split
import sklearn.ensemble
#import wittgenstein as lw
import pandas as pd
from sklearn.metrics import precision_score, recall_score
from PIL import Image
from torch.autograd import Variable
from operator import itemgetter
import ntpath
from sklearn import metrics
import scipy.cluster.hierarchy as shc
from scipy.spatial.distance import pdist
import cv2
import dlib
import json
import glob
import xlsxwriter
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader, Dataset, TensorDataset
from torch.utils.data.sampler import SubsetRandomSampler
from torch import nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import torch.utils.model_zoo as model_zoo
import matplotlib.pyplot as plt
import torch.optim as optim
import imageio
import torchvision
from tqdm import tqdm
from sklearn.preprocessing import normalize
from sklearn.cluster import AgglomerativeClustering
#import dataSupplier as dS
from sklearn.metrics import pairwise_distances
import subprocess
from shutil import rmtree
import hashlib
from PIL import Image
from distutils.dir_util import copy_tree
from os import listdir
class ToTensor(object):
def __call__(self, img):
# imagem numpy: C x H x W
# imagem torch: C X H X W
#img = img.transpose((0, 1, 2)) all the imgs are processed to 1 channel grayscale
return torch.from_numpy(img)
class PathImageFolder(datasets.ImageFolder):
def __getitem__(self, index):
# this is what ImageFolder normally returns
original_tuple = super(PathImageFolder, self).__getitem__(index)
# the image file path
path = self.imgs[index][0]
# make a new tuple that includes original and the path
tuple_with_path = (original_tuple + (path,))
return tuple_with_path
def setupTransformer(dataSetName):
if dataSetName == "ASL" or dataSetName == "AC":
data_transform = transforms.Compose([
# transforms.CenterCrop([256, 256]),
transforms.Resize(256),
# transforms.RandomResizedCrop(256,ratio=(1.0,1.0)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif dataSetName == "TS":
data_transform = transforms.Compose([
# transforms.CenterCrop([256, 256]),
# transforms.Resize(256),
# transforms.RandomResizedCrop(256,ratio=(1.0,1.0)),
transforms.Resize([256, 256]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif dataSetName.startswith("HPD"):
data_transform = transforms.Compose([
# transforms.CenterCrop([256, 256]),
transforms.Resize(128),
# transforms.RandomResizedCrop(256,ratio=(1.0,1.0)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif dataSetName == "FLD":
data_transform = transforms.Compose([ToTensor()])
else:
data_transform = transforms.Compose([
transforms.CenterCrop([256, 256]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
return data_transform
def load_data(data_path, batchSize, maxNum):
print(data_path)
data_supplier = dS.Data(data_path, batchSize, True, False, maxNum)
print(data_supplier)
print(data_supplier.get_data_iters())
return data_supplier.get_data_iters()
def getIEEData(f_path, max_num=0, shuffle=True):
dataset = np.load(f_path, allow_pickle=True)
dataset = dataset.item()
x_data = dataset["data"]
if max_num > 0:
x_data = x_data[:max_num]
x_data = x_data.astype(np.float32)
x_data = x_data / 255.
# x_data = x_data.reshape((-1,1,x_data.shape[-2], x_data.shape[-1]))
x_data = x_data[:, np.newaxis]
# print("x_data shape: ", x_data.shape)
y_data = dataset["label"]
if max_num > 0:
y_data = y_data[:max_num]
y_data = y_data.astype(np.float32)
if shuffle:
r_idx = np.random.permutation(x_data.shape[0])
x_data = x_data[r_idx]
y_data = y_data[r_idx]
x_data = torch.from_numpy(x_data)
y_data = torch.from_numpy(y_data)
return x_data, y_data