-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
222 lines (155 loc) · 6.06 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
from collections import Counter
import keras.backend as K
import matplotlib.pyplot as plt
from keras.callbacks import *
from keras.layers import *
from keras.models import *
from music21 import *
from sklearn.model_selection import train_test_split
# defining function to read MIDI files
def read_midi(file):
print("Loading Music File:", file)
notes = []
notes_to_parse = None
# parsing a midi file
midi = converter.parse(file)
# grouping based on different instruments
s2 = instrument.partitionByInstrument(midi)
# Looping over all the instruments
for part in s2.parts:
print(str(part))
# select elements of only piano
if 'Piano' in str(part):
notes_to_parse = part.recurse()
# finding whether a particular element is note or a chord
for element in notes_to_parse:
# note
if isinstance(element, note.Note):
notes.append(str(element.pitch))
# chord
elif isinstance(element, chord.Chord):
notes.append('.'.join(str(n) for n in element.normalOrder))
return np.array(notes)
# specify the path
path = 'train_subset/'
# read all the filenames
files = [i for i in os.listdir(path) if i.endswith(".mid")]
# reading each midi file
notes_array = np.array([read_midi(path + i) for i in files])
# converting 2D array into 1D array
notes_ = [element for note_ in notes_array for element in note_]
# No. of unique notes
unique_notes = list(set(notes_))
print(len(unique_notes))
# computing frequency of each note
freq = dict(Counter(notes_))
# consider only the frequencies
no = [count for _, count in freq.items()]
# set the figure size
plt.figure(figsize=(5, 5))
# plot
plt.hist(no)
frequent_notes = [note_ for note_, count in freq.items() if count >= 50]
print(len(frequent_notes))
new_music = []
for notes in notes_array:
temp = []
for note_ in notes:
if note_ in frequent_notes:
temp.append(note_)
new_music.append(temp)
new_music = np.array(new_music)
no_of_timesteps = 32
x = []
y = []
for note_ in new_music:
for i in range(0, len(note_) - no_of_timesteps, 1):
# preparing input and output sequences
input_ = note_[i:i + no_of_timesteps]
output = note_[i + no_of_timesteps]
x.append(input_)
y.append(output)
x = np.array(x)
y = np.array(y)
# this contains the unique notes after the eliminating the weak notes in terms of occurrence number.
unique_x = list(set(x.ravel()))
x_note_to_int = dict((note_, number) for number, note_ in enumerate(unique_x))
# preparing input sequences
x_seq = []
for i in x:
temp = []
for j in i:
# assigning unique integer to every note
temp.append(x_note_to_int[j])
x_seq.append(temp)
x_seq = np.array(x_seq)
unique_y = list(set(y))
y_note_to_int = dict((note_, number) for number, note_ in enumerate(unique_y))
y_seq = np.array([y_note_to_int[i] for i in y])
x_tr, x_val, y_tr, y_val = train_test_split(x_seq, y_seq, test_size=0.2, random_state=0)
K.clear_session()
model = Sequential()
# embedding layer
model.add(Embedding(len(unique_x), 100, input_length=32, trainable=True))
model.add(Conv1D(64, 3, padding='causal', activation='relu'))
model.add(Dropout(0.2))
model.add(MaxPool1D(2))
model.add(Conv1D(128, 3, activation='relu', dilation_rate=2, padding='causal'))
model.add(Dropout(0.2))
model.add(MaxPool1D(2))
model.add(Conv1D(256, 3, activation='relu', dilation_rate=4, padding='causal'))
model.add(Dropout(0.2))
model.add(MaxPool1D(2))
# model.add(Conv1D(256,5,activation='relu'))
model.add(GlobalMaxPool1D())
model.add(Dense(256, activation='relu'))
model.add(Dense(len(unique_y), activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')
model.summary()
mc = ModelCheckpoint('best_model.h5', monitor='val_loss', mode='min', save_best_only=True, verbose=1)
history = model.fit(np.array(x_tr), np.array(y_tr), batch_size=128, epochs=50,
validation_data=(np.array(x_val), np.array(y_val)), verbose=1, callbacks=[mc])
from keras.models import load_model
model = load_model('best_model.h5')
ind = np.random.randint(0, len(x_val) - 1)
random_music = x_val[ind]
length_of_the_composition = 10
predictions = []
for i in range(length_of_the_composition):
random_music = random_music.reshape(1, no_of_timesteps)
prob = model.predict(random_music)[0]
y_pred = np.argmax(prob, axis=0)
predictions.append(y_pred)
random_music = np.insert(random_music[0], len(random_music[0]), y_pred)
random_music = random_music[1:]
print(predictions)
x_int_to_note = dict((number, note_) for number, note_ in enumerate(unique_x))
predicted_notes = [x_int_to_note[i] for i in predictions]
def convert_to_midi(prediction_output):
offset = 0
output_notes = []
# create note and chord objects based on the values generated by the model
for pattern in prediction_output:
# pattern is a chord
if ('.' in pattern) or pattern.isdigit():
notes_in_chord = pattern.split('.')
notes = []
for current_note in notes_in_chord:
cn = int(current_note)
new_note = note.Note(cn)
new_note.storedInstrument = instrument.Piano()
notes.append(new_note)
new_chord = chord.Chord(notes)
new_chord.offset = offset
output_notes.append(new_chord)
# pattern is a note
else:
new_note = note.Note(pattern)
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
output_notes.append(new_note)
# increase offset each iteration so that notes do not stack
offset += 1
midi_stream = stream.Stream(output_notes)
midi_stream.write('midi', fp='music.mid')
convert_to_midi(predicted_notes)