# Ignore a bunch of deprecation warnings
import sys
sys.path.append('../..')
import warnings
warnings.filterwarnings("ignore")
import copy
import os
import time
from tqdm import tqdm
import math
import ddsp
import ddsp.training
from data_handling.ddspdataset import DDSPDataset
from utils.training_utils import print_hparams, set_seed, save_results, str2bool
from hparams_midiae_interp_cond import hparams as hp
from midiae_interp_cond.get_model import get_model, get_fake_data
import librosa
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import pandas as pd
import qgrid
from notebook_utils import *
set_seed(1234)
# Helper Functions
sample_rate = 16000
print('Done!')
model_path = r'/data/ddsp-experiment/logs/5.13_samples/150000'
hp_dict = get_hp(os.path.join(os.path.dirname(model_path), 'train.log'))
for k, v in hp_dict.items():
setattr(hp, k, v)
hp.sequence_length=1000
# from data_handling.urmp_tfrecord_dataloader import UrmpMidi
# from data_handling.get_tfrecord_length import get_tfrecord_length
# data_dir = r'/data/music_dataset/urmp_dataset/tfrecord_ddsp/batched/solo_instrument'
# test_data_loader = UrmpMidi(data_dir, instrument_key='vn', split='test')
# evaluation_data = test_data_loader.get_batch(batch_size=1, shuffle=True, repeats=1)
from data_handling.google_solo_inst_dataloader import GoogleSoloInstrument
test_data_loader = GoogleSoloInstrument(base_dir=r'/data/music_dataset/solo_performance_google/solo-inst_midi_features', instrument_key='sax', split='test')
evaluation_data = test_data_loader.get_batch(batch_size=1, shuffle=True, repeats=1)
evaluation_data = iter(evaluation_data)
model = get_model(hp)
_ = model._build(get_fake_data(hp))
model.load_weights(model_path)
sample = next(evaluation_data)
from midiae_interp_cond.interpretable_conditioning import midi_to_hz, get_interpretable_conditioning, extract_harm_controls
plot_spec(sample['audio'][0].numpy(), sr=16000)
synth_params, control_params, synth_audio = model.run_synth_coder(sample, training=False)
synth_params_normalized, midi_features, conditioning_dict = model.gen_cond_dict_from_feature(sample, training=False)
midi_audio, params = model.gen_audio_from_cond_dict(conditioning_dict, midi_features, instrument_id=sample['instrument_id'])
f0, amps, hd, noise = synth_params_normalized
f0_midi = ddsp.core.hz_to_midi(f0)
synth_params_normalized = (f0_midi, amps, hd, noise)
plot_pred_acoustic_feature(sample['audio'].numpy()[0], synth_audio.numpy()[0], get_synth_params(synth_params_normalized), mask_zero_f0=True)
The function of each conditioning:
Loudness:
loudness mean
: overall volume of a noteloudness std
: the extend of the volume changing (crescendo & decrescendo)amplitudes_max_pos
: relative position (0-1) inside a note where the amplidutes reach maximum (=0 decrescendo, =1 crescendo)Attack:
attack_level
: the level of note attack (the average amount of noise in the first 10 frames of each note)Timbre:
brightness
: controls the average timbre of a note (centroid of harmonic distribution.)Pitch:
pitch variation std
: control the extend of vibrato, taken from the amplitude of rfft (actually it should be called "vibrato extend", but I did not change it for compatability)vibrato rate
: rate of the vibrato (taken from rfft)Conditionings are note-pooled. The conditionings of the rest notes are masked to 0.
# sample = next(evaluation_data)
synth_params_normalized, midi_features, conditioning_dict = model.gen_cond_dict_from_feature(sample, training=False)
conditioning_df = conditioining_dict_to_df(conditioning_dict, sample['onsets'], sample['offsets'], sample['midi'])
qgrid_widget = qgrid.show_grid(conditioning_df, show_toolbar=True)
qgrid_widget
conditioning_df_changed = qgrid_widget.get_changed_df()
conditioning_dict = conditioning_df_to_dict(conditioning_df_changed, length=1000)
midi_audio_changed, params_changed = model.gen_audio_from_cond_dict(conditioning_dict, midi_features, instrument_id=sample['instrument_id'])
conditioning_df_changed
plot_pred_acoustic_feature(sample['audio'].numpy()[0], midi_audio_changed.numpy()[0], get_synth_params(params_changed), mask_zero_f0=True)
synth_params_normalized, midi_features, conditioning_dict = model.gen_cond_dict_from_feature(sample, training=False)
conditioning_df = conditioining_dict_to_df(conditioning_dict, sample['onsets'], sample['offsets'], sample['midi'])
conditioning_df['vibrato_rate'] = np.ones_like(conditioning_df['vibrato_rate'].values)*5.25
conditioning_df['pitch_variation_std'] = np.ones_like(conditioning_df['pitch_variation_std'].values)
conditioning_df
conditioning_dict = conditioning_df_to_dict(conditioning_df, length=1000)
midi_audio_changed, params_changed = model.gen_audio_from_cond_dict(conditioning_dict, midi_features, instrument_id=sample['instrument_id'])
plot_pred_acoustic_feature(sample['audio'].numpy()[0], midi_audio_changed.numpy()[0], get_synth_params(params_changed), mask_zero_f0=True)
synth_params_normalized, midi_features, conditioning_dict = model.gen_cond_dict_from_feature(sample, training=False)
conditioning_df = conditioining_dict_to_df(conditioning_dict, sample['onsets'], sample['offsets'], sample['midi'])
conditioning_df['vibrato_rate'] = np.zeros_like(conditioning_df['vibrato_rate'].values)
conditioning_df['pitch_variation_std'] = np.zeros_like(conditioning_df['pitch_variation_std'].values)
conditioning_df
conditioning_dict = conditioning_df_to_dict(conditioning_df, length=1000)
midi_audio_changed, params_changed = model.gen_audio_from_cond_dict(conditioning_dict, midi_features, instrument_id=sample['instrument_id'])
plot_pred_acoustic_feature(sample['audio'].numpy()[0], midi_audio_changed.numpy()[0], get_synth_params(params_changed), mask_zero_f0=True)
synth_params_normalized, midi_features, conditioning_dict = model.gen_cond_dict_from_feature(sample, training=False)
conditioning_df = conditioining_dict_to_df(conditioning_dict, sample['onsets'], sample['offsets'], sample['midi'])
conditioning_df['amplitudes_max_pos'] = np.ones_like(conditioning_df['amplitudes_max_pos'].values)
conditioning_df['loudness_std'] = np.ones_like(conditioning_df['loudness_std'].values) * 0.15
conditioning_df
conditioning_dict = conditioning_df_to_dict(conditioning_df, length=1000)
midi_audio_changed, params_changed = model.gen_audio_from_cond_dict(conditioning_dict, midi_features, instrument_id=sample['instrument_id'])
plot_pred_acoustic_feature(sample['audio'].numpy()[0], midi_audio_changed.numpy()[0], get_synth_params(params_changed), mask_zero_f0=True)
synth_params_normalized, midi_features, conditioning_dict = model.gen_cond_dict_from_feature(sample, training=False)
conditioning_df = conditioining_dict_to_df(conditioning_dict, sample['onsets'], sample['offsets'], sample['midi'])
conditioning_df['amplitudes_max_pos'] = np.zeros_like(conditioning_df['amplitudes_max_pos'].values)
conditioning_df['loudness_std'] = np.ones_like(conditioning_df['loudness_std'].values) * 0.2
# conditioning_df['brightness'] = conditioning_df['brightness'].values * 0.8
conditioning_df
conditioning_dict = conditioning_df_to_dict(conditioning_df, length=1000)
midi_audio_changed, params_changed = model.gen_audio_from_cond_dict(conditioning_dict, midi_features, instrument_id=sample['instrument_id'])
plot_pred_acoustic_feature(sample['audio'].numpy()[0], midi_audio_changed.numpy()[0], get_synth_params(params_changed), mask_zero_f0=True)
synth_params_normalized, midi_features, conditioning_dict = model.gen_cond_dict_from_feature(sample, training=False)
conditioning_df = conditioining_dict_to_df(conditioning_dict, sample['onsets'], sample['offsets'], sample['midi'])
conditioning_df['amplitudes_max_pos'] = np.zeros_like(conditioning_df['amplitudes_max_pos'].values)
conditioning_df['loudness_std'] = np.ones_like(conditioning_df['loudness_std'].values) * 0.2
conditioning_df['brightness'] = conditioning_df['brightness'].values * 0.45
conditioning_df
conditioning_dict = conditioning_df_to_dict(conditioning_df, length=1000)
midi_audio_changed, params_changed = model.gen_audio_from_cond_dict(conditioning_dict, midi_features, instrument_id=sample['instrument_id'])
plot_pred_acoustic_feature(sample['audio'].numpy()[0], midi_audio_changed.numpy()[0], get_synth_params(params_changed), mask_zero_f0=True)