Spaces:
Sleeping
Sleeping
File size: 1,082 Bytes
0f8c6b7 ea1d0af 77e4772 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 | import os
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from huggingface_hub import login
login(token=os.environ["HF_TOKEN"])
model_id = "mistralai/Mistral-7B-Instruct-v0.2"
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype=torch.float16,
)
# Prompt formatter
def build_prompt(user_input):
return f"<s>[INST] {user_input.strip()} [/INST]"
# Chat function
def chat(user_input):
prompt = build_prompt(user_input)
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(
**inputs,
max_new_tokens=300,
do_sample=True,
temperature=0.7,
top_p=0.95,
top_k=50
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response.split('[/INST]')[-1].strip()
# Gradio Interface
demo = gr.Interface(fn=chat, inputs="text", outputs="text", title="Mistral AI Assistant")
demo.launch()
|