denisbay commited on
Commit
a23fd67
·
verified ·
1 Parent(s): 0e50b4b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -6,7 +6,7 @@ import gradio as gr
6
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
7
  from diffusers import StableDiffusionPipeline
8
  from PIL import Image
9
- from glados_voice import GLaDOSVoice #In testing phase!
10
  import tempfile
11
 
12
 
@@ -72,17 +72,17 @@ model = AutoModelForCausalLM.from_pretrained(
72
  torch_dtype=torch.float16,
73
  device_map="auto"
74
  )
75
- glados_voice = GLaDOSVoice #In testing phase!
76
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
77
 
78
- def run_chat_with_voice(message, history, files): #In testing phase!
79
- history = history or []
80
- reply = glados_chat(message, files)
81
 
82
- audio_path = glados_voice.generate_speech(reply)
83
 
84
- history.append((message, reply))
85
- return history, "", audio_path
86
 
87
  def extract_text_from_pdfs(files):
88
  full_text = ""
@@ -189,7 +189,7 @@ with gr.Blocks(css=REMOTE_CSS) as app:
189
  file_input = gr.File(label="Upload PDF(s)", file_types=[".pdf"], file_count="multiple")
190
  t2i_prompt = gr.Textbox(placeholder="Or describe an image...", show_label=False)
191
  generate_btn = gr.Button("🎨 Generate Image")
192
- audio_output = gr.Audio(label="GLaDOS Voice") #In Testing Phase!
193
 
194
  def run_chat(message, history, files):
195
  history = history or []
@@ -205,8 +205,8 @@ with gr.Blocks(css=REMOTE_CSS) as app:
205
  history.append((prompt, (path,)))
206
  return history, ""
207
 
208
- #txt.submit(run_chat, [txt, chatbot, file_input], [chatbot, txt])
209
- txt.submit(run_chat_with_voice, [txt, chatbot, file_input], [chatbot, txt, audio_output]) #In Testing Phase!
210
  generate_btn.click(run_generation, [t2i_prompt, chatbot], [chatbot, t2i_prompt])
211
 
212
  app.launch()
 
6
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
7
  from diffusers import StableDiffusionPipeline
8
  from PIL import Image
9
+ #from glados_voice import GLaDOSVoice #In testing phase!
10
  import tempfile
11
 
12
 
 
72
  torch_dtype=torch.float16,
73
  device_map="auto"
74
  )
75
+ #glados_voice = GLaDOSVoice #In testing phase!
76
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
77
 
78
+ #def run_chat_with_voice(message, history, files): #In testing phase!
79
+ #history = history or []
80
+ #reply = glados_chat(message, files)
81
 
82
+ #audio_path = glados_voice.generate_speech(reply)
83
 
84
+ #history.append((message, reply))
85
+ #return history, "", audio_path
86
 
87
  def extract_text_from_pdfs(files):
88
  full_text = ""
 
189
  file_input = gr.File(label="Upload PDF(s)", file_types=[".pdf"], file_count="multiple")
190
  t2i_prompt = gr.Textbox(placeholder="Or describe an image...", show_label=False)
191
  generate_btn = gr.Button("🎨 Generate Image")
192
+ #audio_output = gr.Audio(label="GLaDOS Voice") #In Testing Phase!
193
 
194
  def run_chat(message, history, files):
195
  history = history or []
 
205
  history.append((prompt, (path,)))
206
  return history, ""
207
 
208
+ txt.submit(run_chat, [txt, chatbot, file_input], [chatbot, txt])
209
+ #txt.submit(run_chat_with_voice, [txt, chatbot, file_input], [chatbot, txt, audio_output]) #In Testing Phase!
210
  generate_btn.click(run_generation, [t2i_prompt, chatbot], [chatbot, t2i_prompt])
211
 
212
  app.launch()