Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI, Request, HTTPException | |
| from pydantic import BaseModel | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline | |
| import uvicorn | |
| import os | |
| import logging | |
| # Setup logging | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| # Model ID dan Token | |
| model_id = "metkid/type-laporan-damkar-classify" | |
| hf_token = os.getenv("HF_TOKEN") # Ambil dari Hugging Face Secrets di Spaces | |
| # Inisialisasi FastAPI | |
| app = FastAPI( | |
| title="Damkar Classification API", | |
| description="API untuk klasifikasi tipe laporan damkar", | |
| version="1.0.0" | |
| ) | |
| # Global variables untuk model | |
| tokenizer = None | |
| model = None | |
| pipe = None | |
| # load model | |
| async def load_model(): | |
| """Load model saat aplikasi startup""" | |
| global tokenizer, model, pipe | |
| try: | |
| logger.info("Loading model...") | |
| # Load tokenizer dan model dengan parameter yang benar | |
| if hf_token: | |
| tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token) | |
| model = AutoModelForSequenceClassification.from_pretrained(model_id, token=hf_token) | |
| else: | |
| # Coba load tanpa token (jika model public) | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| model = AutoModelForSequenceClassification.from_pretrained(model_id) | |
| # Buat pipeline | |
| pipe = pipeline("text-classification", model=model, tokenizer=tokenizer) | |
| logger.info("Model loaded successfully!") | |
| except Exception as e: | |
| logger.error(f"Error loading model: {str(e)}") | |
| raise e | |
| # Request body format | |
| class InputText(BaseModel): | |
| text: str | |
| class PredictionResponse(BaseModel): | |
| label: str | |
| score: float | |
| status: str = "success" | |
| # Root endpoint untuk health check | |
| def read_root(): | |
| return { | |
| "message": "Damkar Classification API is running!", | |
| "status": "healthy", | |
| "endpoints": { | |
| "predict": "/predict", | |
| "docs": "/docs", | |
| "health": "/health" | |
| } | |
| } | |
| # Health check endpoint | |
| def health_check(): | |
| global pipe | |
| if pipe is None: | |
| return {"status": "unhealthy", "message": "Model not loaded"} | |
| return {"status": "healthy", "message": "Model is ready"} | |
| # API endpoint untuk prediksi | |
| def predict(input: InputText): | |
| global pipe | |
| # Check jika model sudah loaded | |
| if pipe is None: | |
| raise HTTPException(status_code=503, detail="Model not loaded yet") | |
| # Validasi input | |
| if not input.text or input.text.strip() == "": | |
| raise HTTPException(status_code=400, detail="Text input cannot be empty") | |
| try: | |
| # Lakukan prediksi | |
| result = pipe(input.text) | |
| return PredictionResponse( | |
| label=result[0]["label"], | |
| score=result[0]["score"] | |
| ) | |
| except Exception as e: | |
| logger.error(f"Prediction error: {str(e)}") | |
| raise HTTPException(status_code=500, detail=f"Prediction failed: {str(e)}") | |
| # Endpoint untuk testing dengan GET method | |
| def test_endpoint(): | |
| return {"message": "Test endpoint is working!"} | |
| # Jalankan lokal (untuk development) | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="0.0.0.0", port=7860) |