Spaces:
Running
Running
Niki Zhang
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -91,7 +91,7 @@ def validate_api_key(api_key):
|
|
| 91 |
return False
|
| 92 |
|
| 93 |
|
| 94 |
-
def init_openai_api_key(api_key=""):
|
| 95 |
text_refiner = None
|
| 96 |
visual_chatgpt = None
|
| 97 |
if api_key and len(api_key) > 30:
|
|
@@ -112,12 +112,13 @@ def init_openai_api_key(api_key=""):
|
|
| 112 |
print(text_refiner)
|
| 113 |
openai_available = text_refiner is not None
|
| 114 |
if openai_available:
|
| 115 |
-
|
|
|
|
| 116 |
else:
|
| 117 |
return [gr.update(visible=False)]*7 + [gr.update(visible=True)]*2 + [text_refiner, visual_chatgpt, 'Your OpenAI API Key is not available']
|
| 118 |
|
| 119 |
def init_wo_openai_api_key():
|
| 120 |
-
return [gr.update(visible=False)]*4 + [gr.update(visible=True)]
|
| 121 |
|
| 122 |
def get_click_prompt(chat_input, click_state, click_mode):
|
| 123 |
inputs = json.loads(chat_input)
|
|
@@ -304,21 +305,20 @@ def submit_caption(image_input, state, generated_caption, text_refiner, visual_c
|
|
| 304 |
input_points=input_points, input_labels=input_labels)
|
| 305 |
try:
|
| 306 |
waveform_visual, audio_output = tts.predict(new_cap, input_language, input_audio, input_mic, use_mic, agree)
|
| 307 |
-
|
| 308 |
-
yield state, state, refined_image_input, click_index_state, input_mask_state, input_points_state, input_labels_state, out_state, waveform_visual, audio_output
|
| 309 |
except Exception as e:
|
| 310 |
state = state + [(None, f"Error during TTS prediction: {str(e)}")]
|
| 311 |
print(f"Error during TTS prediction: {str(e)}")
|
| 312 |
-
|
| 313 |
|
| 314 |
else:
|
| 315 |
try:
|
| 316 |
waveform_visual, audio_output = tts.predict(generated_caption, input_language, input_audio, input_mic, use_mic, agree)
|
| 317 |
-
|
| 318 |
except Exception as e:
|
| 319 |
state = state + [(None, f"Error during TTS prediction: {str(e)}")]
|
| 320 |
print(f"Error during TTS prediction: {str(e)}")
|
| 321 |
-
|
| 322 |
|
| 323 |
|
| 324 |
|
|
@@ -421,7 +421,7 @@ def clear_chat_memory(visual_chatgpt, keep_global=False):
|
|
| 421 |
visual_chatgpt.current_image = None
|
| 422 |
visual_chatgpt.global_prompt = ""
|
| 423 |
|
| 424 |
-
def cap_everything(image_input, visual_chatgpt, text_refiner):
|
| 425 |
|
| 426 |
model = build_caption_anything_with_models(
|
| 427 |
args,
|
|
@@ -438,7 +438,8 @@ def cap_everything(image_input, visual_chatgpt, text_refiner):
|
|
| 438 |
AI_prompt = "Received."
|
| 439 |
visual_chatgpt.global_prompt = Human_prompt + 'AI: ' + AI_prompt
|
| 440 |
visual_chatgpt.agent.memory.buffer = visual_chatgpt.agent.memory.buffer + visual_chatgpt.global_prompt
|
| 441 |
-
|
|
|
|
| 442 |
|
| 443 |
|
| 444 |
def get_style():
|
|
@@ -494,6 +495,7 @@ def create_ui():
|
|
| 494 |
input_mask_state = gr.State(np.zeros((1, 1)))
|
| 495 |
input_points_state = gr.State([])
|
| 496 |
input_labels_state = gr.State([])
|
|
|
|
| 497 |
|
| 498 |
gr.Markdown(title)
|
| 499 |
gr.Markdown(description)
|
|
@@ -501,7 +503,13 @@ def create_ui():
|
|
| 501 |
with gr.Row():
|
| 502 |
with gr.Column(scale=1.0):
|
| 503 |
with gr.Column(visible=False) as modules_not_need_gpt:
|
| 504 |
-
with gr.Tab("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 505 |
image_input = gr.Image(type="pil", interactive=True, elem_id="image_upload")
|
| 506 |
example_image = gr.Image(type="pil", interactive=False, visible=False)
|
| 507 |
with gr.Row(scale=1.0):
|
|
@@ -578,10 +586,13 @@ def create_ui():
|
|
| 578 |
variant='primary')
|
| 579 |
with gr.Column(visible=False) as module_notification_box:
|
| 580 |
notification_box = gr.Textbox(lines=1, label="Notification", max_lines=5, show_label=False)
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
|
|
|
|
|
|
|
|
|
|
| 585 |
with gr.Column(visible=False) as modules_not_need_gpt2:
|
| 586 |
chatbot = gr.Chatbot(label="Chatbox", ).style(height=550, scale=0.5)
|
| 587 |
with gr.Column(visible=False) as modules_need_gpt3:
|
|
@@ -590,7 +601,8 @@ def create_ui():
|
|
| 590 |
with gr.Row():
|
| 591 |
clear_button_text = gr.Button(value="Clear Text", interactive=True)
|
| 592 |
submit_button_text = gr.Button(value="Submit", interactive=True, variant="primary")
|
| 593 |
-
|
|
|
|
| 594 |
# TTS interface hidden initially
|
| 595 |
with gr.Column(visible=False) as tts_interface:
|
| 596 |
input_text = gr.Textbox(label="Text Prompt", value="Hello, World !, here is an example of light voice cloning. Try to upload your best audio samples quality")
|
|
@@ -600,7 +612,7 @@ def create_ui():
|
|
| 600 |
use_mic = gr.Checkbox(label="Check to use Microphone as Reference", value=False)
|
| 601 |
agree = gr.Checkbox(label="Agree", value=True)
|
| 602 |
output_waveform = gr.Video(label="Waveform Visual")
|
| 603 |
-
output_audio = gr.Audio(label="Synthesised Audio")
|
| 604 |
|
| 605 |
with gr.Row():
|
| 606 |
submit_tts = gr.Button(value="Submit", interactive=True)
|
|
@@ -625,10 +637,10 @@ def create_ui():
|
|
| 625 |
)
|
| 626 |
|
| 627 |
|
| 628 |
-
openai_api_key.submit(init_openai_api_key, inputs=[openai_api_key],
|
| 629 |
outputs=[modules_need_gpt0, modules_need_gpt1, modules_need_gpt2, modules_need_gpt3, modules_not_need_gpt,
|
| 630 |
modules_not_need_gpt2, tts_interface,module_key_input ,module_notification_box, text_refiner, visual_chatgpt, notification_box])
|
| 631 |
-
enable_chatGPT_button.click(init_openai_api_key, inputs=[openai_api_key],
|
| 632 |
outputs=[modules_need_gpt0, modules_need_gpt1, modules_need_gpt2, modules_need_gpt3,
|
| 633 |
modules_not_need_gpt,
|
| 634 |
modules_not_need_gpt2, tts_interface,module_key_input,module_notification_box, text_refiner, visual_chatgpt, notification_box])
|
|
@@ -652,7 +664,8 @@ def create_ui():
|
|
| 652 |
show_progress=False
|
| 653 |
)
|
| 654 |
|
| 655 |
-
cap_everything_button.click(cap_everything, [origin_image, visual_chatgpt, text_refiner],
|
|
|
|
| 656 |
|
| 657 |
clear_button_click.click(
|
| 658 |
lambda x: ([[], [], []], x),
|
|
@@ -688,6 +701,10 @@ def create_ui():
|
|
| 688 |
)
|
| 689 |
|
| 690 |
image_input.clear(clear_chat_memory, inputs=[visual_chatgpt])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 691 |
|
| 692 |
|
| 693 |
image_input.upload(upload_callback, [image_input, state, visual_chatgpt],
|
|
@@ -705,8 +722,36 @@ def create_ui():
|
|
| 705 |
example_image.change(upload_callback, [example_image, state, visual_chatgpt],
|
| 706 |
[chatbot, state, origin_image, click_state, image_input, sketcher_input,
|
| 707 |
image_embedding, original_size, input_size])
|
|
|
|
|
|
|
|
|
|
| 708 |
example_image.change(clear_chat_memory, inputs=[visual_chatgpt])
|
| 709 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 710 |
image_input.select(
|
| 711 |
inference_click,
|
| 712 |
inputs=[
|
|
@@ -746,6 +791,11 @@ def create_ui():
|
|
| 746 |
show_progress=False, queue=True
|
| 747 |
)
|
| 748 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 749 |
|
| 750 |
|
| 751 |
|
|
|
|
| 91 |
return False
|
| 92 |
|
| 93 |
|
| 94 |
+
def init_openai_api_key(gpt_state,api_key=""):
|
| 95 |
text_refiner = None
|
| 96 |
visual_chatgpt = None
|
| 97 |
if api_key and len(api_key) > 30:
|
|
|
|
| 112 |
print(text_refiner)
|
| 113 |
openai_available = text_refiner is not None
|
| 114 |
if openai_available:
|
| 115 |
+
gpt_state.set(1)
|
| 116 |
+
return [gr.update(visible=True)]+[gr.update(visible=False)]+[gr.update(visible=True)]*3+[gr.update(visible=False)]+ [gr.update(visible=True)]+ [gr.update(visible=False)]*2 + [text_refiner, visual_chatgpt, None]
|
| 117 |
else:
|
| 118 |
return [gr.update(visible=False)]*7 + [gr.update(visible=True)]*2 + [text_refiner, visual_chatgpt, 'Your OpenAI API Key is not available']
|
| 119 |
|
| 120 |
def init_wo_openai_api_key():
|
| 121 |
+
return [gr.update(visible=False)]*4 + [gr.update(visible=True)]+ [gr.update(visible=False)]+[gr.update(visible=True)]+[gr.update(visible=False)]*2 + [None, None, None]
|
| 122 |
|
| 123 |
def get_click_prompt(chat_input, click_state, click_mode):
|
| 124 |
inputs = json.loads(chat_input)
|
|
|
|
| 305 |
input_points=input_points, input_labels=input_labels)
|
| 306 |
try:
|
| 307 |
waveform_visual, audio_output = tts.predict(new_cap, input_language, input_audio, input_mic, use_mic, agree)
|
| 308 |
+
return state, state, refined_image_input, click_index_state, input_mask_state, input_points_state, input_labels_state, out_state, waveform_visual, audio_output
|
|
|
|
| 309 |
except Exception as e:
|
| 310 |
state = state + [(None, f"Error during TTS prediction: {str(e)}")]
|
| 311 |
print(f"Error during TTS prediction: {str(e)}")
|
| 312 |
+
return state, state, refined_image_input, click_index_state, input_mask_state, input_points_state, input_labels_state, out_state, None, None
|
| 313 |
|
| 314 |
else:
|
| 315 |
try:
|
| 316 |
waveform_visual, audio_output = tts.predict(generated_caption, input_language, input_audio, input_mic, use_mic, agree)
|
| 317 |
+
return state, state, image_input, click_index_state, input_mask_state, input_points_state, input_labels_state, out_state, waveform_visual, audio_output
|
| 318 |
except Exception as e:
|
| 319 |
state = state + [(None, f"Error during TTS prediction: {str(e)}")]
|
| 320 |
print(f"Error during TTS prediction: {str(e)}")
|
| 321 |
+
return state, state, image_input, click_index_state, input_mask_state, input_points_state, input_labels_state, out_state, None, None
|
| 322 |
|
| 323 |
|
| 324 |
|
|
|
|
| 421 |
visual_chatgpt.current_image = None
|
| 422 |
visual_chatgpt.global_prompt = ""
|
| 423 |
|
| 424 |
+
def cap_everything(image_input, visual_chatgpt, text_refiner,input_language, input_audio, input_mic, use_mic, agree):
|
| 425 |
|
| 426 |
model = build_caption_anything_with_models(
|
| 427 |
args,
|
|
|
|
| 438 |
AI_prompt = "Received."
|
| 439 |
visual_chatgpt.global_prompt = Human_prompt + 'AI: ' + AI_prompt
|
| 440 |
visual_chatgpt.agent.memory.buffer = visual_chatgpt.agent.memory.buffer + visual_chatgpt.global_prompt
|
| 441 |
+
waveform_visual, audio_output=tts.predict(paragraph, input_language, input_audio, input_mic, use_mic, agree)
|
| 442 |
+
return paragraph,waveform_visual, audio_output
|
| 443 |
|
| 444 |
|
| 445 |
def get_style():
|
|
|
|
| 495 |
input_mask_state = gr.State(np.zeros((1, 1)))
|
| 496 |
input_points_state = gr.State([])
|
| 497 |
input_labels_state = gr.State([])
|
| 498 |
+
gpt_state = gr.State(0)
|
| 499 |
|
| 500 |
gr.Markdown(title)
|
| 501 |
gr.Markdown(description)
|
|
|
|
| 503 |
with gr.Row():
|
| 504 |
with gr.Column(scale=1.0):
|
| 505 |
with gr.Column(visible=False) as modules_not_need_gpt:
|
| 506 |
+
with gr.Tab("Base(GPT Power)",visible=False) as base_tab:
|
| 507 |
+
image_input_base = gr.Image(type="pil", interactive=True, elem_id="image_upload")
|
| 508 |
+
example_image = gr.Image(type="pil", interactive=False, visible=False)
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
with gr.Tab("Click") as click_tab:
|
| 512 |
+
modules_not_need_gpt2=True
|
| 513 |
image_input = gr.Image(type="pil", interactive=True, elem_id="image_upload")
|
| 514 |
example_image = gr.Image(type="pil", interactive=False, visible=False)
|
| 515 |
with gr.Row(scale=1.0):
|
|
|
|
| 586 |
variant='primary')
|
| 587 |
with gr.Column(visible=False) as module_notification_box:
|
| 588 |
notification_box = gr.Textbox(lines=1, label="Notification", max_lines=5, show_label=False)
|
| 589 |
+
|
| 590 |
+
with gr.Column():
|
| 591 |
+
with gr.Column(visible=False) as modules_need_gpt2:
|
| 592 |
+
paragraph_output = gr.Textbox(lines=7, label="Describe Everything", max_lines=7)
|
| 593 |
+
with gr.Column(visible=False) as modules_need_gpt0:
|
| 594 |
+
cap_everything_button = gr.Button(value="Caption Everything in a Paragraph", interactive=True)
|
| 595 |
+
|
| 596 |
with gr.Column(visible=False) as modules_not_need_gpt2:
|
| 597 |
chatbot = gr.Chatbot(label="Chatbox", ).style(height=550, scale=0.5)
|
| 598 |
with gr.Column(visible=False) as modules_need_gpt3:
|
|
|
|
| 601 |
with gr.Row():
|
| 602 |
clear_button_text = gr.Button(value="Clear Text", interactive=True)
|
| 603 |
submit_button_text = gr.Button(value="Submit", interactive=True, variant="primary")
|
| 604 |
+
|
| 605 |
+
with gr.Column(scale=0.5):
|
| 606 |
# TTS interface hidden initially
|
| 607 |
with gr.Column(visible=False) as tts_interface:
|
| 608 |
input_text = gr.Textbox(label="Text Prompt", value="Hello, World !, here is an example of light voice cloning. Try to upload your best audio samples quality")
|
|
|
|
| 612 |
use_mic = gr.Checkbox(label="Check to use Microphone as Reference", value=False)
|
| 613 |
agree = gr.Checkbox(label="Agree", value=True)
|
| 614 |
output_waveform = gr.Video(label="Waveform Visual")
|
| 615 |
+
output_audio = gr.Audio(label="Synthesised Audio",autoplay=True)
|
| 616 |
|
| 617 |
with gr.Row():
|
| 618 |
submit_tts = gr.Button(value="Submit", interactive=True)
|
|
|
|
| 637 |
)
|
| 638 |
|
| 639 |
|
| 640 |
+
openai_api_key.submit(init_openai_api_key, inputs=[gpt_state,openai_api_key],
|
| 641 |
outputs=[modules_need_gpt0, modules_need_gpt1, modules_need_gpt2, modules_need_gpt3, modules_not_need_gpt,
|
| 642 |
modules_not_need_gpt2, tts_interface,module_key_input ,module_notification_box, text_refiner, visual_chatgpt, notification_box])
|
| 643 |
+
enable_chatGPT_button.click(init_openai_api_key, inputs=[gpt_state,openai_api_key],
|
| 644 |
outputs=[modules_need_gpt0, modules_need_gpt1, modules_need_gpt2, modules_need_gpt3,
|
| 645 |
modules_not_need_gpt,
|
| 646 |
modules_not_need_gpt2, tts_interface,module_key_input,module_notification_box, text_refiner, visual_chatgpt, notification_box])
|
|
|
|
| 664 |
show_progress=False
|
| 665 |
)
|
| 666 |
|
| 667 |
+
cap_everything_button.click(cap_everything, [origin_image, visual_chatgpt, text_refiner,input_language, input_audio, input_mic, use_mic, agree],
|
| 668 |
+
[paragraph_output,output_waveform, output_audio])
|
| 669 |
|
| 670 |
clear_button_click.click(
|
| 671 |
lambda x: ([[], [], []], x),
|
|
|
|
| 701 |
)
|
| 702 |
|
| 703 |
image_input.clear(clear_chat_memory, inputs=[visual_chatgpt])
|
| 704 |
+
|
| 705 |
+
image_input_base.upload(upload_callback, [image_input_base, state, visual_chatgpt],
|
| 706 |
+
[chatbot, state, origin_image, click_state, image_input_base, sketcher_input,
|
| 707 |
+
image_embedding, original_size, input_size])
|
| 708 |
|
| 709 |
|
| 710 |
image_input.upload(upload_callback, [image_input, state, visual_chatgpt],
|
|
|
|
| 722 |
example_image.change(upload_callback, [example_image, state, visual_chatgpt],
|
| 723 |
[chatbot, state, origin_image, click_state, image_input, sketcher_input,
|
| 724 |
image_embedding, original_size, input_size])
|
| 725 |
+
example_image.change(upload_callback, [example_image, state, visual_chatgpt],
|
| 726 |
+
[chatbot, state, origin_image, click_state, image_input_base, sketcher_input,
|
| 727 |
+
image_embedding, original_size, input_size])
|
| 728 |
example_image.change(clear_chat_memory, inputs=[visual_chatgpt])
|
| 729 |
|
| 730 |
+
def on_click_tab_selected():
|
| 731 |
+
if gpt_state ==1:
|
| 732 |
+
print(gpt_state)
|
| 733 |
+
print("using gpt")
|
| 734 |
+
return [gr.update(visible=True)]*2+[gr.update(visible=False)]*2
|
| 735 |
+
else:
|
| 736 |
+
print("no gpt")
|
| 737 |
+
return [gr.update(visible=False)]+[gr.update(visible=True)]+[gr.update(visible=False)]*2
|
| 738 |
+
|
| 739 |
+
def on_base_selected():
|
| 740 |
+
if gpt_state ==1:
|
| 741 |
+
print(gpt_state)
|
| 742 |
+
print("using gpt")
|
| 743 |
+
return [gr.update(visible=True)]*2+[gr.update(visible=False)]*2
|
| 744 |
+
else:
|
| 745 |
+
print("no gpt")
|
| 746 |
+
return [gr.update(visible=False)]*4
|
| 747 |
+
|
| 748 |
+
|
| 749 |
+
click_tab.select(on_click_tab_selected, outputs=[modules_need_gpt1,modules_not_need_gpt2,modules_need_gpt0,modules_need_gpt2])
|
| 750 |
+
base_tab.select(on_base_selected, outputs=[modules_need_gpt0,modules_need_gpt2,modules_not_need_gpt2,modules_need_gpt1])
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
|
| 754 |
+
|
| 755 |
image_input.select(
|
| 756 |
inference_click,
|
| 757 |
inputs=[
|
|
|
|
| 791 |
show_progress=False, queue=True
|
| 792 |
)
|
| 793 |
|
| 794 |
+
def update_output_audio():
|
| 795 |
+
return gr.update(autoplay=True)
|
| 796 |
+
|
| 797 |
+
output_audio.change(update_output_audio,outputs=[output_audio])
|
| 798 |
+
|
| 799 |
|
| 800 |
|
| 801 |
|