Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,16 +1,21 @@
|
|
| 1 |
# app.py
|
|
|
|
| 2 |
import torch
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
import gradio as gr
|
| 5 |
|
| 6 |
MODEL_ID = "mistralai/Mistral-7B-Instruct-v0.1"
|
| 7 |
|
|
|
|
|
|
|
|
|
|
| 8 |
# Load model & tokenizer
|
| 9 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, token=HF_TOKEN)
|
| 10 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 11 |
MODEL_ID,
|
| 12 |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 13 |
-
device_map="auto"
|
|
|
|
| 14 |
)
|
| 15 |
|
| 16 |
def chat_with_mistral(prompt, history=None):
|
|
@@ -39,8 +44,8 @@ def chat_with_mistral(prompt, history=None):
|
|
| 39 |
return answer, history
|
| 40 |
|
| 41 |
|
| 42 |
-
with gr.Blocks(title="Mistral
|
| 43 |
-
gr.Markdown("# π¦ Mistral Chatbot (
|
| 44 |
chatbot = gr.Chatbot()
|
| 45 |
msg = gr.Textbox(label="Your message")
|
| 46 |
clear = gr.Button("Clear")
|
|
@@ -48,12 +53,11 @@ with gr.Blocks(title="Mistral Chat (No API Key)") as app:
|
|
| 48 |
conversation_state = gr.State([])
|
| 49 |
|
| 50 |
def handle_user_message(user_message, history):
|
| 51 |
-
# call model
|
| 52 |
answer, new_history = chat_with_mistral(user_message, history)
|
| 53 |
return "", new_history
|
| 54 |
|
| 55 |
msg.submit(handle_user_message, [msg, conversation_state], [msg, chatbot, conversation_state])
|
| 56 |
-
clear.click(lambda: ([], []), None, [chatbot, conversation_state])
|
| 57 |
|
| 58 |
if __name__ == "__main__":
|
| 59 |
app.launch()
|
|
|
|
| 1 |
# app.py
|
| 2 |
+
import os
|
| 3 |
import torch
|
| 4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 5 |
import gradio as gr
|
| 6 |
|
| 7 |
MODEL_ID = "mistralai/Mistral-7B-Instruct-v0.1"
|
| 8 |
|
| 9 |
+
# Optional: get token from environment
|
| 10 |
+
HF_TOKEN = os.getenv("HF_TOKEN") # make sure you added this secret in HF Space
|
| 11 |
+
|
| 12 |
# Load model & tokenizer
|
| 13 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, token=HF_TOKEN)
|
| 14 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 15 |
MODEL_ID,
|
| 16 |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 17 |
+
device_map="auto",
|
| 18 |
+
token=HF_TOKEN
|
| 19 |
)
|
| 20 |
|
| 21 |
def chat_with_mistral(prompt, history=None):
|
|
|
|
| 44 |
return answer, history
|
| 45 |
|
| 46 |
|
| 47 |
+
with gr.Blocks(title="Mistral Chatbot") as app:
|
| 48 |
+
gr.Markdown("# π¦ Mistral Chatbot (with HF Token)")
|
| 49 |
chatbot = gr.Chatbot()
|
| 50 |
msg = gr.Textbox(label="Your message")
|
| 51 |
clear = gr.Button("Clear")
|
|
|
|
| 53 |
conversation_state = gr.State([])
|
| 54 |
|
| 55 |
def handle_user_message(user_message, history):
|
|
|
|
| 56 |
answer, new_history = chat_with_mistral(user_message, history)
|
| 57 |
return "", new_history
|
| 58 |
|
| 59 |
msg.submit(handle_user_message, [msg, conversation_state], [msg, chatbot, conversation_state])
|
| 60 |
+
clear.click(lambda: ([], []), inputs=None, outputs=[chatbot, conversation_state])
|
| 61 |
|
| 62 |
if __name__ == "__main__":
|
| 63 |
app.launch()
|