Spaces:
Runtime error
Runtime error
Commit ·
27ecaed
1
Parent(s): 3973143
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,10 +7,34 @@ import gradio as gr
|
|
| 7 |
SIMBALS_GLOBAL_DB = 1
|
| 8 |
SIMBALS_MAIN_DATABASE = 2
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
def process(input_path):
|
| 11 |
# setup the client
|
|
|
|
| 12 |
client = SimbalsAPIClient(os.environ['TOKEN'], debug=True)
|
| 13 |
-
parameters = client.prepare_request("31415", audio_file=
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
# add audio features service with an available database
|
| 16 |
#parameters = client.add_audio_features(parameters, SIMBALS_MAIN_DATABASE)
|
|
@@ -61,7 +85,7 @@ def process(input_path):
|
|
| 61 |
audioquality = {}
|
| 62 |
audioquality[res['audio_quality'][0]['name']]= res['audio_quality'][0]['probability']
|
| 63 |
|
| 64 |
-
return dict_moods, genres, instruments, vocalgender, timbres, themes, audioquality,str(dict_desc['Electric/Acoustic']),str(dict_desc['Danceability']),str(dict_desc['Arousal']),str(dict_desc['Vocal/Instrumental']),str(dict_desc['Studio/Live']),str(dict_desc['Music/Speech']),str(dict_desc['Valence']),str(dict_desc['Melodic']),str(dict_desc['Articulation']),str(dict_desc['RhythmicStability']),str(dict_desc['Dissonance']),str(dict_desc['BPM']),str(dict_desc['Binary']),str(dict_desc['Key']),str(dict_desc['Mode']),str(dict_desc['TexturalStability'])
|
| 65 |
|
| 66 |
|
| 67 |
|
|
@@ -72,7 +96,14 @@ with gr.Blocks() as demo:
|
|
| 72 |
with gr.Column():
|
| 73 |
|
| 74 |
with gr.Row():
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
with gr.Row():
|
| 78 |
analyze_btn = gr.Button('Analyze File')
|
|
@@ -136,7 +167,8 @@ with gr.Blocks() as demo:
|
|
| 136 |
'''
|
| 137 |
|
| 138 |
analyze_btn.click(process, inputs=[audio_input],
|
| 139 |
-
outputs=[
|
|
|
|
| 140 |
genres,
|
| 141 |
instruments,
|
| 142 |
vocalgender,
|
|
|
|
| 7 |
SIMBALS_GLOBAL_DB = 1
|
| 8 |
SIMBALS_MAIN_DATABASE = 2
|
| 9 |
|
| 10 |
+
from pytube import YouTube
|
| 11 |
+
|
| 12 |
+
embed_html = '<iframe width="560" height="315" src="https://www.youtube.com/embed/EngW7tLk6R8" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>'
|
| 13 |
+
|
| 14 |
+
embed_html1 = '<iframe width="560" height="315" src="https://www.youtube.com/embed/'
|
| 15 |
+
embed_html2 = '" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>'
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def download_audio(id_video):
|
| 19 |
+
video = YouTube(id_video)
|
| 20 |
+
id = id_video.split("?v=")[-1][:11]
|
| 21 |
+
audio_streams = video.streams.filter(only_audio=True)
|
| 22 |
+
audio_stream = audio_streams[0]
|
| 23 |
+
audio_file = audio_stream.download()
|
| 24 |
+
audio_parts = audio_file.split('/')[-1]
|
| 25 |
+
audio_title = '.'.join(audio_parts.split('.')[:-1])
|
| 26 |
+
embed_html_all = embed_html1 + id +embed_html2
|
| 27 |
+
return audio_file, audio_file, embed_html_all
|
| 28 |
+
|
| 29 |
def process(input_path):
|
| 30 |
# setup the client
|
| 31 |
+
audio_file, audio_file, embed_html_all = download_audio(input_path)
|
| 32 |
client = SimbalsAPIClient(os.environ['TOKEN'], debug=True)
|
| 33 |
+
parameters = client.prepare_request("31415", audio_file=audio_file)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
#client = SimbalsAPIClient(os.environ['TOKEN'], debug=True)
|
| 37 |
+
#parameters = client.prepare_request("31415", audio_file=input_path)
|
| 38 |
|
| 39 |
# add audio features service with an available database
|
| 40 |
#parameters = client.add_audio_features(parameters, SIMBALS_MAIN_DATABASE)
|
|
|
|
| 85 |
audioquality = {}
|
| 86 |
audioquality[res['audio_quality'][0]['name']]= res['audio_quality'][0]['probability']
|
| 87 |
|
| 88 |
+
return embed_html_all, dict_moods, genres, instruments, vocalgender, timbres, themes, audioquality,str(dict_desc['Electric/Acoustic']),str(dict_desc['Danceability']),str(dict_desc['Arousal']),str(dict_desc['Vocal/Instrumental']),str(dict_desc['Studio/Live']),str(dict_desc['Music/Speech']),str(dict_desc['Valence']),str(dict_desc['Melodic']),str(dict_desc['Articulation']),str(dict_desc['RhythmicStability']),str(dict_desc['Dissonance']),str(dict_desc['BPM']),str(dict_desc['Binary']),str(dict_desc['Key']),str(dict_desc['Mode']),str(dict_desc['TexturalStability'])
|
| 89 |
|
| 90 |
|
| 91 |
|
|
|
|
| 96 |
with gr.Column():
|
| 97 |
|
| 98 |
with gr.Row():
|
| 99 |
+
#gr.HTML(embed_html)
|
| 100 |
+
html = gr.HTML()
|
| 101 |
+
|
| 102 |
+
with gr.Row():
|
| 103 |
+
audio_input = gr.Textbox(placeholder='YouTube video URL', label='YouTube video URL')
|
| 104 |
+
|
| 105 |
+
#with gr.Row():
|
| 106 |
+
# audio_input = gr.Audio(type="filepath", label='Audio Input')
|
| 107 |
|
| 108 |
with gr.Row():
|
| 109 |
analyze_btn = gr.Button('Analyze File')
|
|
|
|
| 167 |
'''
|
| 168 |
|
| 169 |
analyze_btn.click(process, inputs=[audio_input],
|
| 170 |
+
outputs=[html,
|
| 171 |
+
dict_moods,
|
| 172 |
genres,
|
| 173 |
instruments,
|
| 174 |
vocalgender,
|