Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| import requests | |
| import json | |
| # Set API key to empty if you want to use free-tier inference API | |
| os.environ["HF_API_KEY"] = "" # Set your HF API key here if needed | |
| # Define API endpoints | |
| HF_INFERENCE_ENDPOINT = "https://api-inference.huggingface.co/models/" | |
| # Define available models with their capabilities | |
| AVAILABLE_MODELS = { | |
| "text-generation": { | |
| "name": "Text Generation", | |
| "model_id": "google/gemma-2b-it", | |
| "description": "Generate text responses to prompts." | |
| }, | |
| "summarization": { | |
| "name": "Text Summarization", | |
| "model_id": "facebook/bart-large-cnn", | |
| "description": "Summarize long texts into shorter versions." | |
| }, | |
| "translation": { | |
| "name": "Translation (English to French)", | |
| "model_id": "Helsinki-NLP/opus-mt-en-fr", | |
| "description": "Translate English text to French." | |
| }, | |
| "question-answering": { | |
| "name": "Question Answering", | |
| "model_id": "deepset/roberta-base-squad2", | |
| "description": "Answer questions based on provided context." | |
| }, | |
| "text-classification": { | |
| "name": "Sentiment Analysis", | |
| "model_id": "distilbert-base-uncased-finetuned-sst-2-english", | |
| "description": "Analyze sentiment of text (positive/negative)." | |
| }, | |
| "image-to-text": { | |
| "name": "Image Captioning", | |
| "model_id": "Salesforce/blip-image-captioning-base", | |
| "description": "Generate captions for images." | |
| } | |
| } | |
| def query_huggingface_api(model_id, inputs, task="text-generation", api_key=None): | |
| """ | |
| Send request to Hugging Face Inference API | |
| """ | |
| headers = { | |
| "Content-Type": "application/json" | |
| } | |
| if api_key: | |
| headers["Authorization"] = f"Bearer {api_key}" | |
| # Prepare payload based on task | |
| payload = { | |
| "inputs": inputs | |
| } | |
| # Special handling for Question-Answering | |
| if task == "question-answering" and isinstance(inputs, dict): | |
| payload = inputs | |
| # Make the API request | |
| try: | |
| response = requests.post( | |
| f"{HF_INFERENCE_ENDPOINT}{model_id}", | |
| headers=headers, | |
| json=payload | |
| ) | |
| if response.status_code == 200: | |
| return response.json() | |
| else: | |
| return {"error": f"API Error: {response.status_code}", "message": response.text} | |
| except Exception as e: | |
| return {"error": f"Request Error: {str(e)}"} | |
| def process_result(result, task): | |
| """Format the API result for display""" | |
| if isinstance(result, dict) and "error" in result: | |
| return f"Error: {result.get('error')}\n{result.get('message', '')}" | |
| try: | |
| if task == "text-generation": | |
| if isinstance(result, list) and len(result) > 0: | |
| return result[0].get("generated_text", str(result)) | |
| return str(result) | |
| elif task == "summarization": | |
| if isinstance(result, list) and len(result) > 0: | |
| return result[0].get("summary_text", str(result)) | |
| return str(result) | |
| elif task == "translation": | |
| if isinstance(result, list) and len(result) > 0: | |
| return result[0].get("translation_text", str(result)) | |
| return str(result) | |
| elif task == "text-classification": | |
| formatted_result = [] | |
| if isinstance(result, list): | |
| for item in result[0]: | |
| label = item.get("label", "") | |
| score = item.get("score", 0) | |
| formatted_result.append(f"{label}: {score:.4f}") | |
| return "\n".join(formatted_result) | |
| return str(result) | |
| elif task == "question-answering": | |
| if isinstance(result, dict): | |
| answer = result.get("answer", "No answer found") | |
| score = result.get("score", 0) | |
| return f"Answer: {answer}\nConfidence: {score:.4f}" | |
| return str(result) | |
| elif task == "image-to-text": | |
| if isinstance(result, list) and len(result) > 0: | |
| return result[0].get("generated_text", str(result)) | |
| return str(result) | |
| else: | |
| return str(result) | |
| except Exception as e: | |
| return f"Error processing result: {str(e)}\nRaw result: {str(result)}" | |
| def run_task(task_name, inputs, context=None, image=None): | |
| """Run the selected task with appropriate inputs""" | |
| if task_name not in AVAILABLE_MODELS: | |
| return "Unknown task selected. Please choose from the available options." | |
| task_info = AVAILABLE_MODELS[task_name] | |
| model_id = task_info["model_id"] | |
| api_key = os.environ.get("HF_API_KEY", "") | |
| try: | |
| # Handle special input types | |
| if task_name == "question-answering" and context: | |
| inputs = { | |
| "question": inputs, | |
| "context": context | |
| } | |
| elif task_name == "image-to-text" and image: | |
| # Direct image API not supported in this simple version | |
| return "Image upload not supported in this version. Please use a URL to an image instead." | |
| # Query the API | |
| result = query_huggingface_api(model_id, inputs, task_name, api_key) | |
| return process_result(result, task_name) | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| # Create Gradio Interface | |
| with gr.Blocks(title="Hugging Face Models Playground") as demo: | |
| gr.Markdown("# 🤗 Hugging Face Models Playground") | |
| gr.Markdown("Access Hugging Face models through their Inference API - no local installation needed!") | |
| task_dropdown = gr.Dropdown( | |
| choices=list(AVAILABLE_MODELS.keys()), | |
| value="text-generation", | |
| label="Select Task" | |
| ) | |
| # Display model information | |
| model_info = gr.Markdown("## Task Description\nGenerate text responses to prompts.") | |
| def update_model_info(task_name): | |
| if task_name in AVAILABLE_MODELS: | |
| info = AVAILABLE_MODELS[task_name] | |
| return f"## {info['name']}\n**Model:** {info['model_id']}\n\n{info['description']}" | |
| return "Select a task to see details" | |
| task_dropdown.change(fn=update_model_info, inputs=task_dropdown, outputs=model_info) | |
| # Create specialized input fields per task | |
| with gr.Group(): | |
| # Primary text input | |
| text_input = gr.Textbox( | |
| label="Input Text", | |
| placeholder="Enter your text here...", | |
| lines=3 | |
| ) | |
| # Context for QA | |
| context_input = gr.Textbox( | |
| label="Context (for Question Answering)", | |
| placeholder="Enter the context text here...", | |
| lines=5, | |
| visible=False | |
| ) | |
| # Image input for image tasks | |
| image_input = gr.Image( | |
| label="Image Input (for image tasks)", | |
| type="filepath", | |
| visible=False | |
| ) | |
| def update_input_visibility(task_name): | |
| show_context = task_name == "question-answering" | |
| show_image = task_name == "image-to-text" | |
| input_label = "Question" if task_name == "question-answering" else "Input Text" | |
| input_placeholder = { | |
| "text-generation": "Enter your prompt here...", | |
| "summarization": "Enter text to summarize...", | |
| "translation": "Enter English text to translate to French...", | |
| "question-answering": "Enter your question here...", | |
| "text-classification": "Enter text for sentiment analysis...", | |
| "image-to-text": "Enter image URL or upload an image..." | |
| }.get(task_name, "Enter your text here...") | |
| return [ | |
| gr.Textbox.update(label=input_label, placeholder=input_placeholder), | |
| gr.Textbox.update(visible=show_context), | |
| gr.Image.update(visible=show_image) | |
| ] | |
| task_dropdown.change( | |
| fn=update_input_visibility, | |
| inputs=task_dropdown, | |
| outputs=[text_input, context_input, image_input] | |
| ) | |
| submit_btn = gr.Button("Run Model", variant="primary") | |
| output_box = gr.Textbox(label="Model Output", lines=10) | |
| # Connect the interface | |
| submit_btn.click( | |
| fn=run_task, | |
| inputs=[task_dropdown, text_input, context_input, image_input], | |
| outputs=output_box | |
| ) | |
| # Launch the interface | |
| demo.launch(server_name="0.0.0.0", server_port=7860) |