-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcustomLoader.py
325 lines (294 loc) · 12.8 KB
/
customLoader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
import os
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
def get_all_subdirectories(root_folder):
subdirectories = []
for dirpath, dirnames, filenames in os.walk(root_folder):
if dirpath != root_folder:
subdirectories.append(os.path.abspath(dirpath))
# 如果需要获取所有子目录的绝对路径,可以取消下一行的注释
# subdirectories.extend([os.path.abspath(os.path.join(dirpath, subdir)) for subdir in dirnames])
return subdirectories
class MultiDataset(Dataset):
def __init__(self, root_dir):
self.root_dir = root_dir
#主文件夹下有几个子文件夹代表天数
self.subfolders = [f.path for f in os.scandir(root_dir) if f.is_dir()]
#天数
self.dayNum = len(self.subfolders)
#print('dayNum',self.dayNum)
#print(self.classNum)
self.file_paths = self.get_file_paths()
#print(len(self.file_paths))
#print(self.file_paths)
#print('len of file paths',len(self.file_paths))
#print('len of [0]',len(self.file_paths[0]))
#print('[0][0]',self.file_paths[3][1])
#类的数量
self.classNum = len(self.file_paths)//self.dayNum
#print(self.classNum)
def __len__(self):
total_length = 0
for i in range(self.classNum):
total_length += len(self.file_paths[0])
return total_length
def __getitem__(self, idx):
#print('idx==',idx)
left_labels = []
right_labels = []
datas = []
index = idx
start = 0
for i in range(self.dayNum):
# 一个天里面总数
#print('i==',i)
oneClassTotal = self.classNum*len(self.file_paths[0])
#print('oneClassTotal',oneClassTotal)
#print('index',index)
index0 = index//len(self.file_paths[0])+i*self.classNum
index1 = index%len(self.file_paths[0])
file_path = self.file_paths[index0][index1]
#print(file_path)
# 从文件夹名称中提取s标签
folder_name = os.path.basename(os.path.dirname(file_path))
#print('folder_name==',folder_name)
#print(folder_name)
left_label, right_label = map(float, folder_name.split('_'))
left_labels.append(torch.tensor(left_label))
right_labels.append(torch.tensor(right_label))
data = np.loadtxt(file_path, delimiter=",", dtype=np.float32)
# 在这里可以进行进一步的数据处理,例如转换为张量等
data = torch.from_numpy(data).float()
datas.append(data)
# 返回数据和标签
datas = torch.stack(datas)
left_labels = torch.stack(left_labels)
right_labels = torch.stack(right_labels)
return datas, left_labels, right_labels
def get_file_paths(self):
file_paths = []
for cur_dir in self.subfolders:
#print(cur_dir)
for sub_dir in get_all_subdirectories(cur_dir):
path = []
#print(sub_dir)
for root, dirs, files in os.walk(sub_dir):
for file in files:
if file.endswith(".csv"):
path.append(os.path.join(root, file))
file_paths.append(path)
return file_paths
#直接多分类
class SimpleMultiDataset(Dataset):
def __init__(self, root_dir):
self.root_dir = root_dir
#主文件夹下有几个子文件夹代表天数
self.subfolders = [f.path for f in os.scandir(root_dir) if f.is_dir()]
#天数
self.dayNum = len(self.subfolders)
#print('dayNum',self.dayNum)
#print(self.classNum)
self.file_paths = self.get_file_paths()
#print(len(self.file_paths))
#print(self.file_paths)
#print('len of file paths',len(self.file_paths))
#print('len of [0]',len(self.file_paths[0]))
#print('[0][0]',self.file_paths[3][1])
#类的数量
self.classNum = len(self.file_paths)//self.dayNum
#print(self.classNum)
def __len__(self):
total_length = 0
for i in range(self.classNum):
total_length += len(self.file_paths[0])
return total_length*self.dayNum
def __getitem__(self, idx):
index=idx
oneClassTotal = self.classNum*len(self.file_paths[0])
#print('oneClassTotal',oneClassTotal)
index0 = index%len(self.file_paths)
index1 = index%len(self.file_paths[0])
file_path = self.file_paths[index0][index1]
#print(file_path)
# 从文件夹名称中提取s标签
folder_name = os.path.basename(os.path.dirname(file_path))
left_label, right_label = map(float, folder_name.split('_'))
data = np.loadtxt(file_path, delimiter=",", dtype=np.float32)
# 在这里可以进行进一步的数据处理,例如转换为张量等
data = torch.from_numpy(data).float()
# 返回数据和标签
return data, left_label, right_label
def get_file_paths(self):
file_paths = []
for cur_dir in self.subfolders:
#print(cur_dir)
for sub_dir in get_all_subdirectories(cur_dir):
path = []
#print(sub_dir)
for root, dirs, files in os.walk(sub_dir):
for file in files:
if file.endswith(".csv"):
path.append(os.path.join(root, file))
file_paths.append(path)
return file_paths
#混杂因子实验时的dataset
class CustomDataset(Dataset):
def __init__(self, root_dir):
self.root_dir = root_dir
self.file_paths = self.get_file_paths()
def __len__(self):
return len(self.file_paths)
def __getitem__(self, idx):
file_path = self.file_paths[idx]
# 从文件夹名称中提取标签
folder_name = os.path.basename(os.path.dirname(file_path))
left_label, right_label = map(float, folder_name.split('_'))
data = np.loadtxt(file_path, delimiter=",", dtype=np.float32) # 假设你的数据是CSV格式的
# 在这里可以进行进一步的数据处理,例如转换为张量等
data = torch.from_numpy(data).float()
# 返回数据和标签
return data, left_label, right_label
def get_file_paths(self):
file_paths = []
for root, dirs, files in os.walk(self.root_dir):
for file in files:
if file.endswith(".csv"):
file_paths.append(os.path.join(root, file))
return file_paths
# End2End模型的dataset,数据以[day,data]的形式存储,并且只有返回left_label,right_label
class End2EndCustomDataset(Dataset):
def __init__(self, root_dir):
self.root_dir = root_dir
#主文件夹下有几个子文件夹\主文件夹下有八个
self.subfolders = [f.path for f in os.scandir(root_dir) if f.is_dir()]
#类的数量
self.classNum = len(self.subfolders)
#print(self.classNum)
self.file_paths = self.get_file_paths()
#print(len(self.file_paths))
#天数
self.dayNum = len(self.file_paths)//self.classNum
#print(len(self.file_paths[0]))
#print(self.dayNum)
def __len__(self):
total_length = 0
for i in range(self.classNum):
total_length += len(self.file_paths[0])
return total_length
def __getitem__(self, idx):
#print('idx==',idx)
left_labels = []
right_labels = []
datas = []
index = idx
start = 0
for i in range(self.dayNum):
if (index-len(self.file_paths[i]))<0:
index = index
break
else:
index = index-len(self.file_paths[i])
start += self.classNum
for i in range(self.dayNum):
#print('start+i==',start+i)
#print('index==',index)
file_path = self.file_paths[start+i][index]
#print(file_path)
# 从文件夹名称中提取s标签
folder_name = os.path.basename(os.path.dirname(os.path.dirname(file_path)))
#print(folder_name)
left_label, right_label = map(float, folder_name.split('_'))
left_labels.append(torch.tensor(left_label))
right_labels.append(torch.tensor(right_label))
data = np.loadtxt(file_path, delimiter=",", dtype=np.float32)
# 在这里可以进行进一步的数据处理,例如转换为张量等
data = torch.from_numpy(data).float()
datas.append(data)
# 返回数据和标签
datas = torch.stack(datas)
left_labels = torch.stack(left_labels)
right_labels = torch.stack(right_labels)
return datas, left_labels, right_labels
def get_file_paths(self):
file_paths = []
for cur_dir in self.subfolders:
#print(cur_dir)
for sub_dir in get_all_subdirectories(cur_dir):
path = []
#print(sub_dir)
for root, dirs, files in os.walk(sub_dir):
for file in files:
if file.endswith(".csv"):
path.append(os.path.join(root, file))
file_paths.append(path)
return file_paths
# 天数鲁棒性dataset,数据以[day,data]的形式存储,并且只有返回left_label,不需要right_label
class NewCustomDataset(Dataset):
def __init__(self, root_dir):
self.root_dir = root_dir
#主文件夹下有几个子文件夹\主文件夹下有0与1两个
self.subfolders = [f.path for f in os.scandir(root_dir) if f.is_dir()]
print(self.subfolders)
self.file_paths = self.get_file_paths()
def __len__(self):
return len(self.file_paths[0])+len(self.file_paths[1])
def __getitem__(self, idx):
left_labels = []
datas = []
if idx<len(self.file_paths[0]):
for i in range(len(self.file_paths)//2):
file_path = self.file_paths[i][idx]
#print(file_path)
# 从文件夹名称中提取s标签
folder_name = os.path.basename(os.path.dirname(os.path.dirname(file_path)))
#print(folder_name)
left_label, right_label = map(float, folder_name.split('_'))
left_labels.append(torch.tensor(left_label))
data = np.loadtxt(file_path, delimiter=",", dtype=np.float32)
# 在这里可以进行进一步的数据处理,例如转换为张量等
data = torch.from_numpy(data).float()
datas.append(data)
# 返回数据和标签
else:
for i in range(len(self.file_paths)//2):
file_path = self.file_paths[i+(len(self.file_paths)//2)][idx%(len(self.file_paths)//2)]
#print(file_path)
# 从文件夹名称中提取标签
folder_name = os.path.basename(os.path.dirname(os.path.dirname(file_path)))
#print(folder_name)
left_label, right_label = map(float, folder_name.split('_'))
left_labels.append(torch.tensor(left_label))
data = np.loadtxt(file_path, delimiter=",", dtype=np.float32)
# 在这里可以进行进一步的数据处理,例如转换为张量等
data = torch.from_numpy(data).float()
datas.append(data)
datas = torch.stack(datas)
left_labels = torch.stack(left_labels)
return datas, left_labels
def get_file_paths(self):
file_paths = []
for cur_dir in self.subfolders:
#print(cur_dir)
for sub_dir in get_all_subdirectories(cur_dir):
path = []
#print(sub_dir)
for root, dirs, files in os.walk(sub_dir):
for file in files:
if file.endswith(".csv"):
path.append(os.path.join(root, file))
file_paths.append(path)
return file_paths
# 示例用法
# root_directory = "D:\\frequencyProcess\\Multi\\tr"
# root_directory = "D:\\frequencyProcess\\testC"
# dataset = SimpleMultiDataset(root_dir=root_directory)
# print(len(dataset))
# dataloader = DataLoader(dataset, batch_size=1, shuffle=True)
# # 遍历数据加载器
# for batch in dataloader:
# data, left_label,right_label = batch
# # # 在这里处理每个批次的数据和标签
# print(len(data)) # 假设你的数据是CSV格式,这里输出数据的形状
# print(left_label) # 输出左边和右边设备的标签
# print(right_label)