Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,9 +1,13 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
|
|
|
| 3 |
|
| 4 |
client = InferenceClient(model="https://pl0u0mrj2ag4o0-80.proxy.runpod.net")
|
|
|
|
| 5 |
|
| 6 |
def inference(message, history):
|
|
|
|
|
|
|
| 7 |
partial_message = ""
|
| 8 |
for token in client.text_generation(message, max_new_tokens=1024, stream=True):
|
| 9 |
partial_message += token
|
|
@@ -12,12 +16,12 @@ def inference(message, history):
|
|
| 12 |
gr.ChatInterface(
|
| 13 |
inference,
|
| 14 |
chatbot=gr.Chatbot(height=300),
|
| 15 |
-
textbox=gr.Textbox(placeholder="
|
| 16 |
-
description="
|
| 17 |
-
title="
|
| 18 |
-
examples=["
|
| 19 |
-
retry_btn="
|
| 20 |
-
undo_btn="
|
| 21 |
-
clear_btn="
|
| 22 |
-
submit_btn="
|
| 23 |
).queue().launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
+
from googletrans import Translator
|
| 4 |
|
| 5 |
client = InferenceClient(model="https://pl0u0mrj2ag4o0-80.proxy.runpod.net")
|
| 6 |
+
translator = Translator()
|
| 7 |
|
| 8 |
def inference(message, history):
|
| 9 |
+
# Translate to English
|
| 10 |
+
message = translator.translate(message, dest="en").text
|
| 11 |
partial_message = ""
|
| 12 |
for token in client.text_generation(message, max_new_tokens=1024, stream=True):
|
| 13 |
partial_message += token
|
|
|
|
| 16 |
gr.ChatInterface(
|
| 17 |
inference,
|
| 18 |
chatbot=gr.Chatbot(height=300),
|
| 19 |
+
textbox=gr.Textbox(placeholder="Please ask your question here...", container=False, scale=7),
|
| 20 |
+
description="This is a chatbot trained on the Llama2-7b model.",
|
| 21 |
+
title="SequoiaDB AI",
|
| 22 |
+
examples=["What is SequioaDB?", "What is SequioaDB's license?", "What is SequioaDB's official website?"],
|
| 23 |
+
retry_btn="Retry",
|
| 24 |
+
undo_btn="Undo",
|
| 25 |
+
clear_btn="Clear",
|
| 26 |
+
submit_btn="Submit",
|
| 27 |
).queue().launch()
|