| | |
| | |
| |
|
| | |
| | |
| |
|
| | |
| | |
| | |
| |
|
| |
|
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | |
| | |
| |
|
| | |
| |
|
| |
|
| | import json |
| | import os |
| | import traceback |
| |
|
| | import google.generativeai as genai |
| | from dotenv import load_dotenv |
| | from fastapi import FastAPI, HTTPException |
| | from pydantic import BaseModel, ValidationError |
| |
|
| | from agents_rag.crew import get_crew_response |
| | from assistants.assistant_v1 import gemini_rag_assistant |
| | from utils.knowledge_base import AgenticRAG |
| | from utils.vectorDB import VectorStore |
| |
|
| | load_dotenv() |
| |
|
| | GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") |
| | genai.configure(api_key=GEMINI_API_KEY) |
| |
|
| | |
| | app = FastAPI(title="Gemini RAG Assistant API", version="1.0.0") |
| |
|
| |
|
| | |
| | class QueryRequest(BaseModel): |
| | query: str |
| | is_uploaded: bool = False |
| | url: str |
| |
|
| |
|
| | |
| | class QueryResponse(BaseModel): |
| | response: str |
| | context: dict |
| |
|
| |
|
| | def llm_answer(query=""): |
| | try: |
| | |
| | model = genai.GenerativeModel(model_name="gemini-2.0-flash-exp") |
| |
|
| | print("query", query) |
| |
|
| | response = model.generate_content(query) |
| | print(response.text) |
| |
|
| | return {"response": response.text, "status": "success"} |
| |
|
| | except Exception as e: |
| | print(f"Error in Gemini chunking: {e}") |
| | return [ |
| | { |
| | "response": "", |
| | "status": "fail", |
| | } |
| | ] |
| |
|
| |
|
| | @app.post("/get-response") |
| | def get_response(request: QueryRequest): |
| | """ |
| | Endpoint to process a query and get a response from the assistant. |
| | """ |
| | try: |
| | |
| | rag = AgenticRAG(query_value=request.query, is_uploaded=request.is_uploaded) |
| | context = rag.query(query_text=request.query, n_results=15) |
| |
|
| | print("Generate answer form gemini") |
| | |
| | |
| | |
| | |
| |
|
| | response = get_crew_response( |
| | query=request.query, context=context, url=request.url |
| | ) |
| |
|
| | print(response) |
| |
|
| | cleaned_text = "".join( |
| | char for char in response if ord(char) >= 32 or char in "\n\r\t" |
| | ) |
| | result = json.loads(cleaned_text) |
| | print(result) |
| |
|
| | result = { |
| | "response": result["Answer"], |
| | "context": result["context"], |
| | "citations": result["citations"], |
| | } |
| | |
| |
|
| | return result |
| |
|
| | except ValidationError as e: |
| | raise HTTPException(status_code=422, detail=f"Validation Error: {e}") |
| |
|
| | except ValueError as e: |
| | raise HTTPException(status_code=400, detail=f"Value Error: {e}") |
| |
|
| | except Exception as e: |
| | traceback.print_exc() |
| | raise HTTPException(status_code=500, detail=f"Internal Server Error: {str(e)}") |
| |
|
| |
|
| | @app.post("/llm-response") |
| | def get_response_llm(request: dict): |
| | """ |
| | Endpoint to process a query and get a response from the assistant. |
| | """ |
| | try: |
| | |
| |
|
| | print("Generate answer form gemini") |
| | |
| |
|
| | result = llm_answer(query=request["query"]) |
| |
|
| | result = { |
| | "response": result["response"], |
| | } |
| | |
| |
|
| | return result |
| |
|
| | except ValidationError as e: |
| | raise HTTPException(status_code=422, detail=f"Validation Error: {e}") |
| |
|
| | except ValueError as e: |
| | raise HTTPException(status_code=400, detail=f"Value Error: {e}") |
| |
|
| | except Exception as e: |
| | traceback.print_exc() |
| | raise HTTPException(status_code=500, detail=f"Internal Server Error: {str(e)}") |
| |
|
| |
|
| | @app.get("/health") |
| | def health_check(): |
| | """ |
| | Endpoint for health check. |
| | """ |
| | return {"status": "ok"} |
| |
|
| |
|
| | @app.post("/delete-file") |
| | async def process_upload_data(request: dict): |
| | """ |
| | Endpoint to retrieve do emedding of new file and store the result in Vector database. |
| | """ |
| | try: |
| | db = VectorStore() |
| |
|
| | print("deletion started.") |
| | db.delete_documents_by_filename(request["file_path"]) |
| | print("deletion end.") |
| |
|
| | return {"response": 200} |
| |
|
| | except ValidationError as e: |
| | raise HTTPException(status_code=422, detail=f"Validation Error: {e}") |
| |
|
| | except ValueError as e: |
| | raise HTTPException(status_code=400, detail=f"Value Error: {e}") |
| |
|
| | except Exception as e: |
| | traceback.print_exc() |
| | raise HTTPException(status_code=500, detail=f"Internal Server Error: {str(e)}") |
| |
|
| |
|
| | @app.post("/process-file") |
| | async def process_upload_data(request: dict): |
| | """ |
| | Endpoint to retrieve do emedding of new file and store the result in Vector database. |
| | """ |
| | try: |
| | |
| | rag = AgenticRAG(is_uploaded=False) |
| |
|
| | print("process started.") |
| | rag.process_file(request["file_path"]) |
| | print("process end.") |
| |
|
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| |
|
| | return {"response": 200} |
| |
|
| | except ValidationError as e: |
| | raise HTTPException(status_code=422, detail=f"Validation Error: {e}") |
| |
|
| | except ValueError as e: |
| | raise HTTPException(status_code=400, detail=f"Value Error: {e}") |
| |
|
| | except Exception as e: |
| | traceback.print_exc() |
| | raise HTTPException(status_code=500, detail=f"Internal Server Error: {str(e)}") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | import uvicorn |
| |
|
| | uvicorn.run(app, host="0.0.0.0", port=8000) |
| |
|