This repository has been archived by the owner on Jul 5, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1
/
useModel.py
46 lines (39 loc) · 1.59 KB
/
useModel.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import os
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow import keras
from conversion import spectrogram_to_audio, spectrogram_to_image
from tqdm import trange
from configparser import ConfigParser
def Main():
config = ConfigParser()
config.read('config.ini')
modelName = config['MISC']['modelName']
verbose = eval(config['MISC']['verbose'])
reconstructed_model = keras.models.load_model(modelName)
if verbose:
print(reconstructed_model.get_weights())
sliceSize = int(config['Structure']['sliceSize'])
for dir in os.listdir('_use'):
tmp = []
for file in os.listdir(f'_use/{dir}'):
if file.split('.')[-1] == 'npy':
test_input = np.load(f'_use/{dir}/{file}')
if verbose:
print('Input shape:')
print(test_input.shape)
for i in trange(test_input.shape[1]//sliceSize, unit='spec'):
out = reconstructed_model.predict(np.array([test_input[:,i*sliceSize:(i+1)*sliceSize].flatten()]))
out -= .5
out *= 10
out.shape = 257, sliceSize
tmp.append(out)
final = tmp[0]
for arr in tmp[1:]:
final = np.concatenate((final, arr), axis=1)
if verbose:
spectrogram_to_image(final, f'output/{dir}Converted')
np.save(f'output/{dir}Converted.npy', final)
spectrogram_to_audio(final, f'output/{dir}Converted'+'.wav', 64, 22050)