Skip to content

Commit 5587ff7

Browse files
committed
cleanup inspection codes
1 parent b6c9cbc commit 5587ff7

File tree

2 files changed

+2
-197
lines changed

2 files changed

+2
-197
lines changed

inspection.ipynb

-145
This file was deleted.

inspection.py

+2-52
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,3 @@
1-
#!/usr/bin/env python
2-
# coding: utf-8
3-
41
# # Comparison of Inverse-STFT implementations
52
# - Seungwon Park's implementation: IFFT + deconvolution for stacking `ytmp`
63
# - Keunwoo Choi's implementation: based on IRFFT
@@ -26,8 +23,10 @@ def inner(*args,**kwargs):
2623
time = start.elapsed_time(end)
2724
return output,time
2825
return inner
26+
2927
istft_irfft = timing(istft_irfft)
3028
istft_deconv = timing(istft_deconv)
29+
3130
def test_stft():
3231
import traceback
3332
audio, sr = librosa.load(librosa.util.example_audio_file(), duration=2,sr=None)
@@ -60,55 +59,6 @@ def to_np(tensor):
6059
# print(traceback.print_exc())
6160

6261

63-
64-
65-
66-
67-
6862
if __name__ == "__main__":
6963
test_stft()
70-
=======
71-
# In[3]:
72-
73-
74-
y, sr = librosa.load(librosa.util.example_audio_file(), duration=2.0)
75-
n_fft = 2048
76-
hop_length = n_fft // 4
77-
y = torch.tensor(y)
78-
stft = torch.stft(y, n_fft, hop_length)
79-
80-
81-
# In[4]:
82-
83-
84-
stft_single = stft
85-
stft_batch = stft.unsqueeze(0)
86-
87-
88-
# In[5]:
89-
90-
91-
result_deconv = istft_deconv(stft_single, hop_length)
92-
result_irfft = istft_irfft(stft_batch, hop_length)[0]
93-
94-
diff = torch.max(torch.abs(result_deconv - result_irfft)).item()
95-
96-
if diff < 1e-4:
97-
print(f'Results are consistent. Maximum difference: {diff}')
98-
99-
100-
# In[6]:
101-
102-
103-
get_ipython().run_line_magic('timeit', 'result_deconv = istft_deconv(stft_single, hop_length)')
104-
105-
106-
# In[7]:
107-
108-
109-
get_ipython().run_line_magic('timeit', 'result_irfft = istft_irfft(stft_batch, hop_length)[0]')
110-
111-
112-
# # Conclusion
113-
# - IRFFT-based implementation is faster, showing that better parallelization doesn't outspeed algorithmic optimization.
11464

0 commit comments

Comments
 (0)