Spaces:
Running
Running
| from fastapi import FastAPI | |
| from fastapi.middleware.cors import CORSMiddleware | |
| import uvicorn | |
| from router import embedding_router | |
| from router import gold_predict_model | |
| from router import image_embedding_router | |
| from router import llamindex_router | |
| app = FastAPI( | |
| title="RAG+LLM", | |
| description="RAG+LLM", | |
| ) | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| app.include_router(llamindex_router.router, prefix="/llama_index") | |
| app.include_router(embedding_router.router, prefix="/embedding") | |
| app.include_router(image_embedding_router.router, prefix="/image_embedding") | |
| app.include_router(gold_predict_model.router, prefix="/gold_predict") | |
| def read_root(): | |
| return {"message": "RAG+LLM API is running successfully."} | |
| if __name__ == "__main__": | |
| uvicorn.run("app:app", host="0.0.0.0", port=8000, reload=True) | |