This repository has been archived by the owner on Jul 5, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1
/
trainModel.py
110 lines (89 loc) · 3.83 KB
/
trainModel.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import os
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow import keras
from conversion import spectrogram_to_image
from configparser import ConfigParser
def get_model(sliceSize, hiddenLayers, lossFunc, learningRate):
global verbose
# Create a simple model.
inputs1 = keras.Input(shape=(257*sliceSize,))
inputs2 = keras.layers.BatchNormalization(momentum=0.8)(inputs1)
layers = [inputs2]
for i, size in enumerate(hiddenLayers):
layers.append(keras.layers.Dense(size, activation=tf.nn.leaky_relu, name=f"dense_{i+1}", kernel_initializer="random_normal")(layers[i]))
outputs = keras.layers.Dense(257*sliceSize,activation="sigmoid")(layers[-1])
model = keras.Model(inputs1, outputs)
opt = keras.optimizers.Adam(lr=learningRate)
model.compile(optimizer=opt, loss=lossFunc)
if verbose:
print(model.get_weights())
return model
def Main():
global verbose
config = ConfigParser()
config.read('config.ini')
modelName = config['MISC']['modelName']
verbose = eval(config['MISC']['verbose'])
sliceSize = int(config['Structure']['sliceSize'])
hiddenLayers = [int(i) for i in config['Structure']['hiddenLayers'].split(',')]
learningRate = float(config['Advanced']['learningRate'])
lossFunc = config['Advanced']['lossFunc']
batchSize = int(config['Advanced']['batchSize'])
print('\n====================')
print('Loaded from configuration file:\n')
print('MISC:')
print('\tModel Name:', modelName)
print('Structure:')
print('\tSlice Size:', sliceSize)
print('\tHidden Layers:', hiddenLayers)
print('Advanced:')
print('\tLearning Rate:', learningRate)
print('\tLoss Function:', lossFunc)
print('Batch Size:', batchSize)
print('====================\n')
# Train the model.
inputData = []
targetData = []
for inputDataset, targetDataset in zip(os.listdir('_training/input'), os.listdir('_training/output')):
iterator = zip(
[file for file in
os.listdir(f'_training/input/{inputDataset}')
if file.split('.')[-1] == 'npy'],
[file for file in
os.listdir(f'_training/output/{targetDataset}')
if file.split('.')[-1] == 'npy']
)
for inSegment, outSegment in iterator:
loadIn = np.load(f'_training/input/{inputDataset}/{inSegment}')
loadIn *= loadIn/loadIn.max()
if loadIn.shape[1] < sliceSize:
print(f'Dataset "{inputDataset}" will be omitted from training because it is too short.')
continue
loadTarget = np.load(f'_training/output/{targetDataset}/{outSegment}')
loadTarget *= loadTarget/loadTarget.max()
if loadTarget.shape[1] < sliceSize:
print(f'Dataset "{targetDataset}" will be omitted from training because it is too short.')
continue
for i, j in zip(range(np.size(loadIn, 1)//sliceSize),range(np.size(loadTarget, 1)//sliceSize)):
a = loadIn[:,i*sliceSize:(i+1)*sliceSize]
b = loadTarget[:,j*sliceSize:(j+1)*sliceSize]
inputData.append(a.flatten())
targetData.append(b.flatten())
inputData = np.asarray(inputData)
targetData = np.asarray(targetData)
if os.path.isdir(modelName):
model = keras.models.load_model(modelName)
else:
model = get_model(sliceSize, hiddenLayers, lossFunc, learningRate)
try:
model.fit(inputData, targetData,batchSize,10000)
except KeyboardInterrupt:
pass
finally:
print('\n\n/////////////////////////\n')
print("DO NOT CLOSE -- MODEL SAVING!!!")
print('\n/////////////////////////')
model.save(modelName)