Spaces:
Sleeping
Sleeping
| import os | |
| import logging | |
| import sys | |
| import torch | |
| import tempfile | |
| from fastapi import FastAPI, HTTPException, Request | |
| from fastapi.responses import JSONResponse | |
| from pydantic import BaseModel | |
| from typing import List, Optional | |
| import uvicorn | |
| import random | |
| # Configure logging | |
| logging.basicConfig( | |
| level=logging.INFO, | |
| format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', | |
| handlers=[logging.StreamHandler(sys.stdout)] | |
| ) | |
| logger = logging.getLogger(__name__) | |
| app = FastAPI(title="Chat API", description="Simple chat API for Hugging Face Space") | |
| # Use the system's temporary directory which should be writable | |
| temp_dir = tempfile.mkdtemp() | |
| os.environ["TRANSFORMERS_CACHE"] = temp_dir | |
| os.environ["HF_HOME"] = temp_dir | |
| logger.info(f"Using temporary directory: {temp_dir}") | |
| # Pydantic models for request/response | |
| class ChatTurn(BaseModel): | |
| user: Optional[str] = None | |
| assistant: Optional[str] = None | |
| class ChatRequest(BaseModel): | |
| message: str | |
| history: Optional[List[ChatTurn]] = [] | |
| class ChatResponse(BaseModel): | |
| response: str | |
| # Fallback responses | |
| FALLBACK_RESPONSES = [ | |
| "I apologize, but I'm currently having trouble processing your request.", | |
| "Sorry, I'm experiencing technical difficulties at the moment.", | |
| "I'm unable to generate a proper response right now. Please try again later.", | |
| "My language model is temporarily unavailable. Please check back soon.", | |
| "I would like to help, but I'm having some technical issues. Please try again shortly." | |
| ] | |
| # Error handler | |
| async def generic_exception_handler(request: Request, exc: Exception): | |
| logger.error(f"Unhandled exception: {str(exc)}", exc_info=True) | |
| return JSONResponse( | |
| status_code=500, | |
| content={"detail": f"Internal server error: {str(exc)}"} | |
| ) | |
| # Simple text-only route | |
| async def root(): | |
| return {"message": "Chat API is running. Use /api/chat for chat functionality."} | |
| # Chat endpoint - just use fallback responses for now | |
| async def chat(request: ChatRequest): | |
| logger.info(f"Received chat request: {request.message[:50]}...") | |
| # Select a random fallback response | |
| fallback = random.choice(FALLBACK_RESPONSES) | |
| # Add a bit of personalization | |
| if "hello" in request.message.lower() or "hi" in request.message.lower(): | |
| fallback = "Hello! " + fallback | |
| elif "help" in request.message.lower(): | |
| fallback = "I'd like to help you with that, but " + fallback.lower() | |
| logger.info(f"Returning fallback response") | |
| return ChatResponse(response=fallback) | |
| async def health_check(): | |
| return { | |
| "status": "ok", | |
| "system_info": { | |
| "device": "cpu", # No GPU for now | |
| "temp_dir": temp_dir, | |
| "pwd": os.getcwd(), | |
| "user": os.getenv("USER", "unknown"), | |
| "writable_temp": os.access(temp_dir, os.W_OK), | |
| "writable_cwd": os.access(os.getcwd(), os.W_OK) | |
| } | |
| } | |
| if __name__ == "__main__": | |
| port = int(os.environ.get("PORT", 7860)) | |
| uvicorn.run("app:app", host="0.0.0.0", port=port, reload=False) |