import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import torch # Load model MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.3" tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, device_map="auto", torch_dtype=torch.float16) def generate(prompt): inputs = tokenizer(prompt, return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model.generate(**inputs, max_new_tokens=300) text = tokenizer.decode(outputs[0], skip_special_tokens=True) return text # Gradio interface iface = gr.Interface( fn=generate, inputs=gr.Textbox(lines=5, placeholder="Enter your prompt here"), outputs=gr.Textbox(label="AI Response"), ) iface.launch(server_name="0.0.0.0", server_port=7860)