mistral-api / app.py
noelia1120's picture
Create app.py
d86ce80 verified
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import gradio as gr
model_id = "mistralai/Mistral-7B-Instruct-v0.1"
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=True)
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
chat_history = []
def chat(user_input):
global chat_history
messages = [{"role": "system", "content": "You are a helpful assistant."}]
for message in chat_history:
messages.append({"role": "user", "content": message[0]})
messages.append({"role": "assistant", "content": message[1]})
messages.append({"role": "user", "content": user_input})
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
output = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
answer = decoded_output.split(messages[-1]["content"])[-1].strip()
chat_history.append((user_input, answer))
return answer
iface = gr.Interface(fn=chat, inputs="text", outputs="text", title="Mistral 7B Chat")
iface.launch()