import os import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM import torch from huggingface_hub import login login(token=os.environ["HF_TOKEN"]) model_id = "mistralai/Mistral-7B-Instruct-v0.2" # Load tokenizer and model tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, device_map="auto", torch_dtype=torch.float16, ) # Prompt formatter def build_prompt(user_input): return f"[INST] {user_input.strip()} [/INST]" # Chat function def chat(user_input): prompt = build_prompt(user_input) inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate( **inputs, max_new_tokens=300, do_sample=True, temperature=0.7, top_p=0.95, top_k=50 ) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response.split('[/INST]')[-1].strip() # Gradio Interface demo = gr.Interface(fn=chat, inputs="text", outputs="text", title="Mistral AI Assistant") demo.launch()