Spaces:
Sleeping
Sleeping
Update app.py
#1
by
arjunanand13
- opened
app.py
CHANGED
|
@@ -1,29 +1,51 @@
|
|
| 1 |
from transformers import pipeline, Conversation
|
| 2 |
import gradio as gr
|
| 3 |
from diffusers import DiffusionPipeline
|
| 4 |
-
import gradio as gr
|
| 5 |
-
from transformers import pipeline
|
| 6 |
import scipy
|
| 7 |
-
|
|
|
|
| 8 |
chatbot = pipeline(model="facebook/blenderbot-400M-distill")
|
|
|
|
|
|
|
|
|
|
| 9 |
message_list = []
|
| 10 |
response_list = []
|
| 11 |
-
def vanilla_chatbot(message
|
| 12 |
conversation = Conversation(text=message, past_user_inputs=message_list, generated_responses=response_list)
|
| 13 |
bot = chatbot(conversation.messages[0]['content']) # working code
|
| 14 |
return bot[-1]['generated_text']
|
| 15 |
|
| 16 |
-
|
| 17 |
-
model_id = "CompVis/ldm-text2im-large-256"
|
| 18 |
-
ldm = DiffusionPipeline.from_pretrained(model_id)
|
| 19 |
def generate_image(Prompt):
|
| 20 |
images = ldm([Prompt], num_inference_steps=50, eta=.3, guidance_scale=6)
|
| 21 |
return images.images[0]
|
| 22 |
|
| 23 |
-
|
| 24 |
-
synthesiser = pipeline("text-to-audio", "facebook/musicgen-small")
|
| 25 |
def generate_music(Prompt):
|
| 26 |
music = synthesiser(Prompt, forward_params={"do_sample": True, "max_new_tokens":100})
|
| 27 |
rate = music["sampling_rate"]
|
| 28 |
mus = music["audio"][0].reshape(-1)
|
| 29 |
return rate,mus
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from transformers import pipeline, Conversation
|
| 2 |
import gradio as gr
|
| 3 |
from diffusers import DiffusionPipeline
|
|
|
|
|
|
|
| 4 |
import scipy
|
| 5 |
+
|
| 6 |
+
#Initializing Models
|
| 7 |
chatbot = pipeline(model="facebook/blenderbot-400M-distill")
|
| 8 |
+
ldm = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
|
| 9 |
+
synthesiser = pipeline("text-to-audio", "facebook/musicgen-small")
|
| 10 |
+
|
| 11 |
message_list = []
|
| 12 |
response_list = []
|
| 13 |
+
def vanilla_chatbot(message):
|
| 14 |
conversation = Conversation(text=message, past_user_inputs=message_list, generated_responses=response_list)
|
| 15 |
bot = chatbot(conversation.messages[0]['content']) # working code
|
| 16 |
return bot[-1]['generated_text']
|
| 17 |
|
|
|
|
|
|
|
|
|
|
| 18 |
def generate_image(Prompt):
|
| 19 |
images = ldm([Prompt], num_inference_steps=50, eta=.3, guidance_scale=6)
|
| 20 |
return images.images[0]
|
| 21 |
|
|
|
|
|
|
|
| 22 |
def generate_music(Prompt):
|
| 23 |
music = synthesiser(Prompt, forward_params={"do_sample": True, "max_new_tokens":100})
|
| 24 |
rate = music["sampling_rate"]
|
| 25 |
mus = music["audio"][0].reshape(-1)
|
| 26 |
return rate,mus
|
| 27 |
+
|
| 28 |
+
def process_input(Prompt,choice):
|
| 29 |
+
if choice == "Chat":
|
| 30 |
+
return vanilla_chatbot(text),None,None
|
| 31 |
+
elif choice == 'Music':
|
| 32 |
+
rate,audio = generate_music(Prompt)
|
| 33 |
+
return None, (rate,audio), None
|
| 34 |
+
else:
|
| 35 |
+
return None , None , generate_image(Prompt)
|
| 36 |
+
|
| 37 |
+
with gr.Blocks as demo:
|
| 38 |
+
with gr.Row():
|
| 39 |
+
text_input = gr.Textbox()
|
| 40 |
+
choice = gr.Radio(choices=["Chat","Music","Image"])
|
| 41 |
+
|
| 42 |
+
with gr.Row():
|
| 43 |
+
chatbot_output = gr.Textbox()
|
| 44 |
+
music_output =gr.Audio()
|
| 45 |
+
image_output =gr.Image()
|
| 46 |
+
|
| 47 |
+
submit_btn = gr.Button("Generate")
|
| 48 |
+
|
| 49 |
+
submit_btn.click(fn=process_input,inputs=[text_input,choice],outputs=[chatbot_output,music_output,image_output])
|
| 50 |
+
|
| 51 |
+
demo.launch(debug=True)
|