# AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb. # %% auto 0 __all__ = ['learn', 'categories', 'aud', 'examples', 'intf', 'log_mel_spec_tfm', 'classify_aud'] # %% app.ipynb 1 from fastai.vision.all import * import matplotlib.pyplot as plt import librosa.display import numpy as np import pandas as pd import librosa import gradio as gr # %% app.ipynb 2 def log_mel_spec_tfm(fname): y, sr = librosa.load(fname, mono=True) D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max) img = librosa.display.specshow(D, y_axis='linear', x_axis='time', sr=sr) plt.savefig(str(fname[:-4]) + '.png') plt.close() return img # %% app.ipynb 3 learn = load_learner('model.pkl') learn.remove_cb(ProgressCallback) # %% app.ipynb 6 categories = ('Brass', 'Flute', 'Guitar', 'Keyboard', 'Mallet', 'Reed', 'String', 'Vocal') def classify_aud(aud): log_mel_spec_tfm(aud) img_fname = str(aud[:-4]) + '.png' pred, idx, probs = learn.predict(img_fname) return dict(zip(categories, map(float, probs))) # %% app.ipynb 8 aud = gr.Audio(source="upload", type="filepath") examples = [f.name for f in Path('.').iterdir() if '.wav' in f.name] intf = gr.Interface(fn = classify_aud, inputs = aud, outputs = "label", examples = examples) intf.launch(inline = False)