Spaces:
Running
Running
| import gradio as gr | |
| from inference import predict | |
| import torch | |
| from huggingface_hub import hf_hub_download | |
| # This pulls just the model file from your specific repo | |
| model_path = hf_hub_download(repo_id="PRUTHVIn/vqa_project", filename="weights/vqa_model.pth") | |
| # Now load it into your model class (example) | |
| # model.load_state_dict(torch.load(model_path)) | |
| def vqa_interface(image, question): | |
| try: | |
| if image is None or question.strip() == "": | |
| return "Please upload an image and enter a question." | |
| answer = predict(image, question) | |
| return answer | |
| except Exception as e: | |
| print("ERROR:", str(e)) | |
| return f"Error: {str(e)}" | |
| iface = gr.Interface( | |
| fn=vqa_interface, | |
| inputs=[ | |
| gr.Image(type="filepath", label="Upload Image"), | |
| gr.Textbox( | |
| label="Ask a Question", | |
| placeholder="e.g. What is in the image?" | |
| ) | |
| ], | |
| outputs=gr.Textbox(label="Answer"), | |
| title="🧠 Smart Visual Question Answering System", | |
| theme="soft" | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() |