Spaces:
Sleeping
Sleeping
Update
Browse files
app.py
CHANGED
|
@@ -62,15 +62,32 @@
|
|
| 62 |
|
| 63 |
|
| 64 |
|
| 65 |
-
import
|
|
|
|
| 66 |
|
| 67 |
-
|
| 68 |
-
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
|
| 64 |
|
| 65 |
+
import gradio as gr
|
| 66 |
+
from huggingface_hub import InferenceClient
|
| 67 |
|
| 68 |
+
# Initialize the Hugging Face Inference Client
|
| 69 |
+
client = InferenceClient(
|
| 70 |
+
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
| 71 |
+
# token= # Replace with your actual token
|
| 72 |
+
)
|
| 73 |
|
| 74 |
+
# Define a function to handle the chat input and get a response from the model
|
| 75 |
+
def chat_with_model(user_input):
|
| 76 |
+
# Call the client to get the model's response
|
| 77 |
+
response = ""
|
| 78 |
+
for message in client.chat_completion(
|
| 79 |
+
messages=[{"role": "user", "content": user_input}],
|
| 80 |
+
max_tokens=500,
|
| 81 |
+
stream=True,
|
| 82 |
+
):
|
| 83 |
+
response += message.choices[0].delta.content
|
| 84 |
+
return response
|
| 85 |
+
|
| 86 |
+
# Create a Gradio interface with a chat component
|
| 87 |
+
with gr.Blocks() as demo:
|
| 88 |
+
chatbot = gr.Chatbot()
|
| 89 |
+
with gr.Row():
|
| 90 |
+
txt = gr.Textbox(show_label=False, placeholder="Type your message here...")
|
| 91 |
+
txt.submit(chat_with_model, inputs=txt, outputs=chatbot)
|
| 92 |
+
|
| 93 |
+
demo.launch()
|