Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI, Form | |
| import uvicorn | |
| from model import LogisticRegressionModel | |
| from helper import get_llm, classification_modeL_cache, llm_model_cache, prompt | |
| app = FastAPI() | |
| async def root(): | |
| return {"message": "Sentiment Analysis API is running."} | |
| async def load_log(): | |
| return {"message": "hello"} | |
| async def chat_endpoint(message: str = Form(...)): | |
| if "model" not in classification_modeL_cache: | |
| classification_modeL_cache["model"] = LogisticRegressionModel() | |
| if "llm" not in llm_model_cache: | |
| llm_model_cache["llm"] = get_llm() | |
| prediction = classification_modeL_cache["model"].predict(message) | |
| result = llm_model_cache["llm"].invoke( | |
| prompt.format(text=message, positive_prob=prediction[0][1], negative_prob=prediction[0][0])) | |
| if result: | |
| return result.content | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="127.0.0.1", port=7861) |