forked from FAR-Lab/RealtimeAudioClassification
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathhelperFunctions.py
145 lines (129 loc) · 5.34 KB
/
helperFunctions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
from __future__ import print_function
import csv
import numpy as np
import random
import librosa
import wave
import os
import matplotlib.pyplot as plt
from matplotlib import cm
import pickle
from ipywidgets import interactive
import ipywidgets as widgets
from PIL import Image
import IPython.display as displayImg
from ipywidgets import interact, widgets
import glob
import IPython.display as ipd
import sys
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
def listdir_nohidden(path):
return glob.glob(os.path.join(path, '*'))
def GenerateSpectrums(MainFile):
SpectrumVariables={}
with open('../SpectrumVarialbes.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for k in row:
SpectrumVariables[k]=int(row[k])
x ,sample_rate_in = librosa.load(MainFile,mono=True)
audio_data = librosa.resample(x, sample_rate_in, SpectrumVariables['SAMPLE_RATE'])
mel_spec_power = librosa.feature.melspectrogram(audio_data, sr=SpectrumVariables['SAMPLE_RATE'],
n_fft=SpectrumVariables['N_FFT'],
hop_length=SpectrumVariables['HOP_LENGTH'],
n_mels=SpectrumVariables['N_MELS'],
power=SpectrumVariables['POWER'],
fmin=SpectrumVariables['FMIN'],
fmax=SpectrumVariables['FMAX'])
mel_spec_db = np.float32(librosa.power_to_db(mel_spec_power, ref=np.max))
mel_spec_db-=mel_spec_db.min()
mel_spec_db/=mel_spec_db.max()
im = np.uint8(cm.gist_earth(mel_spec_db)*255)[:,:,:3]
ArrayofPictures = []
RESOLUTION = SpectrumVariables['RESOLUTION']
for i in range(int(np.floor(im.shape[1]/RESOLUTION))):
startx=RESOLUTION*i
stopx=RESOLUTION*(i+1)
ArrayofPictures.append(im[:,startx:stopx,:])
return ArrayofPictures
def log_mel_spec_tfm(dataInput):
src_path=dataInput[0]
dst_path=dataInput[1]
#print(src_path, dst_path)
print('Starting on',os.path.split(src_path)[1])
pictures = GenerateSpectrums(src_path)
print(len(pictures))
fname = os.path.split(src_path)[-1]
count=0
for pic in pictures:
plt.imsave(os.path.join(dst_path,(fname.replace(".flac",'-')\
.replace(".aif",'-').replace(".wav",'-')\
.replace(".m4a",'-').replace(".mp3",'-')\
+str(count)+'.png')), pic)
count+=1
if(count==0):
print(src_path)
try:
Type
except NameError:
if(len(sys.argv)>1):
print("FoundArguments, will start converting")
source = str(sys.argv[1])
target = str(sys.argv[2])
log_mel_spec_tfm((source,target))
else:
if(Type=="INTERFACE"):
SOURCE_DATA_ROOT='../AudioData/'
style = {'description_width': 'initial'}
ClassSelection = widgets.Dropdown(options=listdir_nohidden(SOURCE_DATA_ROOT), description='Source for Training Data:',style=style)
FileSelection = widgets.Dropdown(description='Audio file to visualize',style=style)
def updateLocation(*args):
FileSelection.options=listdir_nohidden(os.path.join(SOURCE_DATA_ROOT,ClassSelection.value))
ClassSelection.observe(updateLocation)
display(ClassSelection)
display(FileSelection)
updateLocation();
elif(Type=="TRAINING"):
SPECTRUM_IMAGES_ROOT="../GeneratedData/"
class SpectrumDataset(torch.utils.data.Dataset):
"""Face Landmarks dataset."""
def __init__(self,ClassName,root_dir,transform=None):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.ClassName=ClassName
self.fileList= [f for f in os.listdir(root_dir) if f.endswith('.png')]
print(root_dir,len(self.fileList))
self.transform = transform
def ReduceSize(self,ItemCount):
self.fileList = random.choices(self.fileList, k=ItemCount)
def __len__(self):
return len(self.fileList)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_path = os.path.join(self.root_dir,
self.fileList[idx])
image = Image.open(img_path)
image=image.convert('RGB')
if self.transform:
image = self.transform(image)
return image,self.ClassName
classes = [os.path.split(c)[1] for c in listdir_nohidden(SPECTRUM_IMAGES_ROOT)]
widgetDict={}
print("Select classes to use for training:");
for c in classes:
widgetDict[c]=widgets.Checkbox(
value=False,
description=c,
disabled=False,
indent=False)
display(widgetDict[c])