import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM import torch model_id = "sakthi54321/power_ai" # Load model + tokenizer tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, device_map="auto" ) # Simple function: one input → one output def ask_model(prompt): # force very direct answer input_text = f"Question: {prompt}\nAnswer:" inputs = tokenizer(input_text, return_tensors="pt").to(model.device) outputs = model.generate( **inputs, max_new_tokens=800, # keep answers short pad_token_id=tokenizer.eos_token_id, do_sample=True, top_p=0.9, temperature=0.7 ) response = tokenizer.decode(outputs[0], skip_special_tokens=True) # only take the text after "Answer:" if "Answer:" in response: response = response.split("Answer:")[-1].strip() return response # Gradio UI (straightforward) demo = gr.Interface( fn=ask_model, inputs=gr.Textbox(label="Ask something", placeholder="Type your question here..."), outputs=gr.Textbox(label="Model Response"), title="🤖 Power AI", description="Straightforward Q&A with your trained model" ) demo.launch()