import gradio as gr import torch from transformers import AutoModelForCausalLM, AutoTokenizer # Replace with your actual Hugging Face username and model repo model_id = "Scaryscar/Hackhaton" # Load model and tokenizer tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, device_map="auto" # automatically maps model to GPU if available ) # Inference function def generate_answer(prompt, max_new_tokens=256, temperature=0.7, top_p=0.95): inputs = tokenizer(prompt, return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p, do_sample=True, pad_token_id=tokenizer.eos_token_id, ) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Gradio UI gr.Interface( fn=generate_answer, inputs=[ gr.Textbox(label="Enter your math problem or prompt here"), gr.Slider(50, 1024, value=256, step=1, label="Max New Tokens"), gr.Slider(0.1, 1.0, value=0.7, step=0.05, label="Temperature"), gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)") ], outputs=gr.Textbox(label="WizardMath Response"), title="🧙 WizardMath: Fine-Tuned LLM", description="Ask WizardMath a math question or give it a reasoning prompt. This model has been fine-tuned for math reasoning using LLM capabilities.", theme="default" ).launch()