Spaces:
Sleeping
Sleeping
| import tempfile | |
| from transformers import pipeline, RobertaForSequenceClassification, RobertaTokenizer | |
| import gradio as gr | |
| from fastapi import FastAPI, UploadFile, File, Request, HTTPException | |
| import os | |
| import json | |
| from typing import Optional, Dict, List | |
| import torch | |
| # Initialize models | |
| model_name = "cardiffnlp/twitter-roberta-base-emotion" | |
| tokenizer = RobertaTokenizer.from_pretrained(model_name) | |
| model = RobertaForSequenceClassification.from_pretrained(model_name) | |
| emotion_analysis = pipeline("text-classification", | |
| model=model, | |
| tokenizer=tokenizer, | |
| top_k=None) # Replaced return_all_scores with top_k | |
| app = FastAPI() | |
| def save_upload_file(upload_file: UploadFile) -> str: | |
| """Save uploaded file to temporary location""" | |
| try: | |
| suffix = os.path.splitext(upload_file.filename)[1] | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp: | |
| content = upload_file.file.read() | |
| if suffix == '.json': | |
| content = content.decode('utf-8') | |
| tmp.write(content if isinstance(content, bytes) else content.encode()) | |
| return tmp.name | |
| finally: | |
| upload_file.file.close() | |
| async def predict_from_upload(file: UploadFile = File(...)): | |
| """API endpoint for file uploads""" | |
| try: | |
| temp_path = save_upload_file(file) | |
| if temp_path.endswith('.json'): | |
| with open(temp_path, 'r') as f: | |
| data = json.load(f) | |
| text = data.get('description', '') | |
| else: | |
| with open(temp_path, 'r') as f: | |
| text = f.read() | |
| if not text.strip(): | |
| raise HTTPException(status_code=400, detail="No text content found") | |
| result = emotion_analysis(text) | |
| emotions = [{'label': e['label'], 'score': float(e['score'])} | |
| for e in sorted(result[0], key=lambda x: x['score'], reverse=True)] | |
| os.unlink(temp_path) | |
| return {"success": True, "results": emotions} | |
| except Exception as e: | |
| if 'temp_path' in locals() and os.path.exists(temp_path): | |
| os.unlink(temp_path) | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| def gradio_predict(input_data, file_data=None): | |
| """Handle both direct text and file uploads""" | |
| try: | |
| if file_data is not None: | |
| temp_path = save_upload_file(file_data) | |
| if temp_path.endswith('.json'): | |
| with open(temp_path, 'r') as f: | |
| data = json.load(f) | |
| text = data.get('description', '') | |
| else: | |
| with open(temp_path, 'r') as f: | |
| text = f.read() | |
| os.unlink(temp_path) | |
| else: | |
| text = input_data | |
| if not text.strip(): | |
| return {"error": "No text content found"} | |
| result = emotion_analysis(text) | |
| return { | |
| "emotions": [ | |
| {e['label']: float(e['score'])} | |
| for e in sorted(result[0], key=lambda x: x['score'], reverse=True) | |
| ] | |
| } | |
| except Exception as e: | |
| return {"error": str(e)} | |
| # Simplified Gradio interface without examples | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Text Emotion Analysis") | |
| with gr.Row(): | |
| with gr.Column(): | |
| text_input = gr.Textbox(label="Enter text directly", lines=5) | |
| file_input = gr.File(label="Or upload file", file_types=[".txt", ".json"]) | |
| submit_btn = gr.Button("Analyze") | |
| with gr.Column(): | |
| output = gr.JSON(label="Results") | |
| submit_btn.click( | |
| fn=gradio_predict, | |
| inputs=[text_input, file_input], | |
| outputs=output, | |
| api_name="predict" | |
| ) | |
| app = gr.mount_gradio_app(app, demo, path="/") | |
| if __name__ == "__main__": | |
| import uvicorn | |
| uvicorn.run(app, host="0.0.0.0", port=7860) |