Update app.py
Browse files
app.py
CHANGED
|
@@ -150,6 +150,15 @@ def add_text(chatbot, history, prompt, file):
|
|
| 150 |
print("chatbot nach add_text............")
|
| 151 |
print(chatbot)
|
| 152 |
return chatbot, history, prompt, "" #gr.Image( label=None, size=(30,30), visible=False, scale=1) #gr.Textbox(value="", interactive=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
|
| 154 |
############################################
|
| 155 |
#nach dem Upload soll das zusätzliche Fenster mit dem image drinnen angezeigt werden
|
|
@@ -414,22 +423,21 @@ def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, o
|
|
| 414 |
|
| 415 |
chatbot[-1][1] = result
|
| 416 |
|
| 417 |
-
|
| 418 |
-
"""
|
| 419 |
for character in result:
|
| 420 |
-
|
| 421 |
time.sleep(0.03)
|
| 422 |
-
yield
|
| 423 |
if shared_state.interrupted:
|
| 424 |
shared_state.recover()
|
| 425 |
try:
|
| 426 |
-
yield
|
| 427 |
except:
|
| 428 |
pass
|
| 429 |
-
|
| 430 |
##################################################
|
| 431 |
#zu einem Text-Prompt ein Bild via Stable Diffusion generieren
|
| 432 |
-
def generate_bild(prompt, chatbot,
|
| 433 |
#Bild nach Anweisung zeichnen und in History darstellen...
|
| 434 |
data = {"inputs": prompt}
|
| 435 |
response = requests.post(API_URL, headers=HEADERS, json=data)
|
|
@@ -439,7 +447,7 @@ def generate_bild(prompt, chatbot, temperature=0.5, max_new_tokens=4048,top_p=0.
|
|
| 439 |
image = Image.open(io.BytesIO(result))
|
| 440 |
image_64 = umwandeln_fuer_anzeige(image)
|
| 441 |
chatbot.append(prompt, "<img src='data:image/png;base64,{0}'/>".format(base64.b64encode(image_64).decode('utf-8')))
|
| 442 |
-
return chatbot
|
| 443 |
|
| 444 |
|
| 445 |
|
|
@@ -622,32 +630,25 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
| 622 |
################################################
|
| 623 |
# Tab zum Zeichnen mit Stable Diffusion
|
| 624 |
################################################
|
| 625 |
-
|
| 626 |
-
|
| 627 |
-
gr.HTML("LI Zeichnen mit KI")
|
| 628 |
-
gr.Markdown(description_top)
|
| 629 |
-
with gr.Row():
|
| 630 |
-
description2 = "<strong>Information:</strong> Hier wird ein <strong>Large Language Model (LLM)</strong> zum Zeichnen verwendet. Zur Zeit wird hier Stable Diffusion verwendet.\n\n"
|
| 631 |
-
additional_inputs = [
|
| 632 |
gr.Slider(label="Temperature", value=0.65, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Höhere Werte erzeugen diversere Antworten", visible=True),
|
| 633 |
gr.Slider(label="Max new tokens", value=1024, minimum=0, maximum=4096, step=64, interactive=True, info="Maximale Anzahl neuer Tokens", visible=True),
|
| 634 |
gr.Slider(label="Top-p (nucleus sampling)", value=0.6, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Höhere Werte verwenden auch Tokens mit niedrigerer Wahrscheinlichkeit.", visible=True),
|
| 635 |
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens", visible=True)
|
| 636 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 637 |
chatbot_bild = gr.Chatbot(elem_id="li-zeichnen")
|
| 638 |
-
|
| 639 |
-
|
| 640 |
-
|
| 641 |
-
|
| 642 |
-
theme="soft",
|
| 643 |
-
chatbot=chatbot_bild,
|
| 644 |
-
retry_btn="🔄 Wiederholen",
|
| 645 |
-
undo_btn="↩️ Letztes löschen",
|
| 646 |
-
clear_btn="🗑️ Verlauf löschen",
|
| 647 |
-
submit_btn = "Abschicken",
|
| 648 |
-
description = description2)
|
| 649 |
-
|
| 650 |
-
gr.Markdown(description)
|
| 651 |
|
| 652 |
######################################
|
| 653 |
# Events und Übergabe Werte an Funktionen
|
|
@@ -660,7 +661,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
| 660 |
inputs=[
|
| 661 |
user_question,
|
| 662 |
upload,
|
| 663 |
-
|
| 664 |
history,
|
| 665 |
rag_option,
|
| 666 |
model_option,
|
|
@@ -683,7 +684,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
| 683 |
|
| 684 |
# Chatbot
|
| 685 |
transfer_input_args = dict(
|
| 686 |
-
fn=add_text, inputs=[chatbot, history, user_input, upload], outputs=[chatbot,
|
| 687 |
)
|
| 688 |
|
| 689 |
predict_event1 = user_input.submit(**transfer_input_args, queue=False,).then(**predict_args)
|
|
@@ -697,6 +698,26 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
| 697 |
|
| 698 |
######################################
|
| 699 |
# Für Tab 2: Zeichnen
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 700 |
|
| 701 |
|
| 702 |
demo.title = "LI-ChatBot"
|
|
|
|
| 150 |
print("chatbot nach add_text............")
|
| 151 |
print(chatbot)
|
| 152 |
return chatbot, history, prompt, "" #gr.Image( label=None, size=(30,30), visible=False, scale=1) #gr.Textbox(value="", interactive=False)
|
| 153 |
+
|
| 154 |
+
def add_text2(chatbot, prompt):
|
| 155 |
+
if (prompt == ""):
|
| 156 |
+
chatbot = chatbot + [("", "Prompt fehlt!")]
|
| 157 |
+
else:
|
| 158 |
+
chatbot = chatbot + [(prompt, None)]
|
| 159 |
+
print("chatbot nach add_text............")
|
| 160 |
+
print(chatbot)
|
| 161 |
+
return chatbot, prompt,
|
| 162 |
|
| 163 |
############################################
|
| 164 |
#nach dem Upload soll das zusätzliche Fenster mit dem image drinnen angezeigt werden
|
|
|
|
| 423 |
|
| 424 |
chatbot[-1][1] = result
|
| 425 |
|
| 426 |
+
|
|
|
|
| 427 |
for character in result:
|
| 428 |
+
chatbot[-1][1] += character
|
| 429 |
time.sleep(0.03)
|
| 430 |
+
yield chatbot, history "Generating"
|
| 431 |
if shared_state.interrupted:
|
| 432 |
shared_state.recover()
|
| 433 |
try:
|
| 434 |
+
yield chatbot, history "Stop: Success"
|
| 435 |
except:
|
| 436 |
pass
|
| 437 |
+
|
| 438 |
##################################################
|
| 439 |
#zu einem Text-Prompt ein Bild via Stable Diffusion generieren
|
| 440 |
+
def generate_bild(prompt, chatbot, temperature=0.5, max_new_tokens=4048,top_p=0.6, repetition_penalty=1.3):
|
| 441 |
#Bild nach Anweisung zeichnen und in History darstellen...
|
| 442 |
data = {"inputs": prompt}
|
| 443 |
response = requests.post(API_URL, headers=HEADERS, json=data)
|
|
|
|
| 447 |
image = Image.open(io.BytesIO(result))
|
| 448 |
image_64 = umwandeln_fuer_anzeige(image)
|
| 449 |
chatbot.append(prompt, "<img src='data:image/png;base64,{0}'/>".format(base64.b64encode(image_64).decode('utf-8')))
|
| 450 |
+
return chatbot, "Success"
|
| 451 |
|
| 452 |
|
| 453 |
|
|
|
|
| 630 |
################################################
|
| 631 |
# Tab zum Zeichnen mit Stable Diffusion
|
| 632 |
################################################
|
| 633 |
+
description2 = "<strong>Information:</strong> Hier wird ein <strong>Large Language Model (LLM)</strong> zum Zeichnen verwendet. Zur Zeit wird hier Stable Diffusion verwendet.\n\n"
|
| 634 |
+
additional_inputs = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 635 |
gr.Slider(label="Temperature", value=0.65, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Höhere Werte erzeugen diversere Antworten", visible=True),
|
| 636 |
gr.Slider(label="Max new tokens", value=1024, minimum=0, maximum=4096, step=64, interactive=True, info="Maximale Anzahl neuer Tokens", visible=True),
|
| 637 |
gr.Slider(label="Top-p (nucleus sampling)", value=0.6, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Höhere Werte verwenden auch Tokens mit niedrigerer Wahrscheinlichkeit.", visible=True),
|
| 638 |
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens", visible=True)
|
| 639 |
]
|
| 640 |
+
with gr.Tab("KI zum Zeichnen"):
|
| 641 |
+
with gr.Row():
|
| 642 |
+
gr.HTML("LI Zeichnen mit KI")
|
| 643 |
+
status_display2 = gr.Markdown("Success", elem_id="status_display")
|
| 644 |
+
gr.Markdown(description2)
|
| 645 |
+
with gr.Row():
|
| 646 |
+
|
| 647 |
chatbot_bild = gr.Chatbot(elem_id="li-zeichnen")
|
| 648 |
+
submitBtn2 = gr.Button("Senden")
|
| 649 |
+
cancelBtn2 = gr.Button("Stop")
|
| 650 |
+
emptyBtn2 = gr.ClearButton([user_input, chatbot, history, file_display, image_display], value="🧹 Neue Session", scale=10)
|
| 651 |
+
additional_inputs_accordion = gr.Accordion(label="Weitere Eingaben...", open=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 652 |
|
| 653 |
######################################
|
| 654 |
# Events und Übergabe Werte an Funktionen
|
|
|
|
| 661 |
inputs=[
|
| 662 |
user_question,
|
| 663 |
upload,
|
| 664 |
+
chatbot_bild,
|
| 665 |
history,
|
| 666 |
rag_option,
|
| 667 |
model_option,
|
|
|
|
| 684 |
|
| 685 |
# Chatbot
|
| 686 |
transfer_input_args = dict(
|
| 687 |
+
fn=add_text, inputs=[chatbot, history, user_input, upload], outputs=[chatbot, user_question, user_input], show_progress=True
|
| 688 |
)
|
| 689 |
|
| 690 |
predict_event1 = user_input.submit(**transfer_input_args, queue=False,).then(**predict_args)
|
|
|
|
| 698 |
|
| 699 |
######################################
|
| 700 |
# Für Tab 2: Zeichnen
|
| 701 |
+
predict_args2 = dict(
|
| 702 |
+
fn=generate_bild,
|
| 703 |
+
inputs=[
|
| 704 |
+
user_question2,
|
| 705 |
+
chatbot_bild,
|
| 706 |
+
additional_inputs,
|
| 707 |
+
],
|
| 708 |
+
outputs=[chatbot_bild, status_display2], #[chatbot, history, status_display]
|
| 709 |
+
show_progress=True,
|
| 710 |
+
)
|
| 711 |
+
transfer_input_args = dict(
|
| 712 |
+
fn=add_text2, inputs=[chatbot_bild, user_input2], outputs=[chatbot_bild, user_question2, user_input2], show_progress=True
|
| 713 |
+
)
|
| 714 |
+
predict_event2_1 = user_input.submit(**transfer_input_args2, queue=False,).then(**predict_args2)
|
| 715 |
+
predict_event2_2 = submitBtn.click(**transfer_input_args2, queue=False,).then(**predict_args2)
|
| 716 |
+
#emptyBtn2.click(clear_all, [], [file_display, image_display])
|
| 717 |
+
|
| 718 |
+
cancelBtn.click(
|
| 719 |
+
cancels=[predict_event2_1,predict_event2_2 ]
|
| 720 |
+
)
|
| 721 |
|
| 722 |
|
| 723 |
demo.title = "LI-ChatBot"
|