Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| # Get your Hugging Face token from environment variables (set this in your Space's secrets) | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| # Initialize the inference client | |
| client = InferenceClient( | |
| provider="hf-inference", | |
| api_key=HF_TOKEN, | |
| ) | |
| # Define the function to use in Gradio | |
| def answer_question(question, context): | |
| if not HF_TOKEN: | |
| return "HF_TOKEN not found. Please set it in the environment variables." | |
| try: | |
| result = client.question_answering( | |
| question=question, | |
| context=context, | |
| model="deepset/roberta-base-squad2", | |
| ) | |
| return result["answer"] | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| # Gradio Interface | |
| iface = gr.Interface( | |
| fn=answer_question, | |
| inputs=[ | |
| gr.Textbox(label="Question"), | |
| gr.Textbox(label="Context", lines=5), | |
| ], | |
| outputs="text", | |
| title="Question Answering with RoBERTa", | |
| description="Enter a question and context. The model will find the answer from the context.", | |
| ) | |
| # Launch for Hugging Face Space | |
| if __name__ == "__main__": | |
| iface.launch() | |