import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM import torch model_id = "devNaam/vakilai-llama32-3b-v1" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, device_map="auto" ) def generate_response(prompt): inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate( **inputs, max_new_tokens=300, temperature=0.7, top_p=0.9 ) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response iface = gr.Interface( fn=generate_response, inputs=gr.Textbox(lines=5, placeholder="Ask VakilAI a legal question..."), outputs="text", title="VakilAI Legal Assistant", description="AI Legal assistant trained on Indian legal data." ) iface.launch()