Spaces:
No application file
No application file
Commit Β·
7054249
1
Parent(s): 39f360e
Add application file
Browse files- DockerFile +18 -0
- app/__pycache__/config.cpython-311.pyc +0 -0
- app/__pycache__/config.cpython-313.pyc +0 -0
- app/__pycache__/database.cpython-311.pyc +0 -0
- app/__pycache__/database.cpython-313.pyc +0 -0
- app/__pycache__/main.cpython-311.pyc +0 -0
- app/__pycache__/main.cpython-313.pyc +0 -0
- app/__pycache__/qdrant_client.cpython-311.pyc +0 -0
- app/__pycache__/qdrant_client.cpython-313.pyc +0 -0
- app/config.py +20 -0
- app/database.py +30 -0
- app/main.py +43 -0
- app/models/__pycache__/chat.cpython-311.pyc +0 -0
- app/models/__pycache__/translation.cpython-313.pyc +0 -0
- app/models/__pycache__/user.cpython-311.pyc +0 -0
- app/models/__pycache__/user.cpython-313.pyc +0 -0
- app/models/chat.py +14 -0
- app/models/user.py +9 -0
- app/qdrant_client.py +40 -0
- app/routes/__pycache__/auth.cpython-313.pyc +0 -0
- app/routes/__pycache__/chat.cpython-311.pyc +0 -0
- app/routes/__pycache__/chat.cpython-313.pyc +0 -0
- app/routes/__pycache__/personalize.cpython-313.pyc +0 -0
- app/routes/__pycache__/translate.cpython-313.pyc +0 -0
- app/routes/chat.py +60 -0
- app/schemas/__pycache__/auth.cpython-313.pyc +0 -0
- app/schemas/__pycache__/chat.cpython-311.pyc +0 -0
- app/schemas/__pycache__/chat.cpython-313.pyc +0 -0
- app/schemas/__pycache__/personalize.cpython-313.pyc +0 -0
- app/schemas/__pycache__/translate.cpython-313.pyc +0 -0
- app/schemas/chat.py +23 -0
- app/services/__pycache__/auth.cpython-313.pyc +0 -0
- app/services/__pycache__/embeddings_service.cpython-311.pyc +0 -0
- app/services/__pycache__/embeddings_service.cpython-313.pyc +0 -0
- app/services/__pycache__/gemini_service.cpython-313.pyc +0 -0
- app/services/__pycache__/openai_service.cpython-311.pyc +0 -0
- app/services/__pycache__/openai_service.cpython-313.pyc +0 -0
- app/services/__pycache__/rag_service.cpython-311.pyc +0 -0
- app/services/__pycache__/rag_service.cpython-313.pyc +0 -0
- app/services/embeddings_service.py +21 -0
- app/services/openai_service.py +26 -0
- app/services/rag_service.py +40 -0
- chat_database.db +0 -0
- requirements.txt +12 -0
- run.bat +15 -0
- scripts/ingest_content.py +104 -0
- setup.bat +29 -0
- test_request.json +4 -0
- test_request2.json +4 -0
- test_setup.py +132 -0
DockerFile
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Base image
|
| 2 |
+
FROM python:3.11-slim
|
| 3 |
+
|
| 4 |
+
# Set work directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Install dependencies
|
| 8 |
+
COPY requirements.txt .
|
| 9 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 10 |
+
|
| 11 |
+
# Copy project files
|
| 12 |
+
COPY . .
|
| 13 |
+
|
| 14 |
+
# Expose the port Hugging Face expects
|
| 15 |
+
EXPOSE 7860
|
| 16 |
+
|
| 17 |
+
# Command to run FastAPI with uvicorn
|
| 18 |
+
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
app/__pycache__/config.cpython-311.pyc
ADDED
|
Binary file (1.4 kB). View file
|
|
|
app/__pycache__/config.cpython-313.pyc
ADDED
|
Binary file (1.46 kB). View file
|
|
|
app/__pycache__/database.cpython-311.pyc
ADDED
|
Binary file (1.07 kB). View file
|
|
|
app/__pycache__/database.cpython-313.pyc
ADDED
|
Binary file (1.43 kB). View file
|
|
|
app/__pycache__/main.cpython-311.pyc
ADDED
|
Binary file (1.8 kB). View file
|
|
|
app/__pycache__/main.cpython-313.pyc
ADDED
|
Binary file (2.12 kB). View file
|
|
|
app/__pycache__/qdrant_client.cpython-311.pyc
ADDED
|
Binary file (2.11 kB). View file
|
|
|
app/__pycache__/qdrant_client.cpython-313.pyc
ADDED
|
Binary file (1.96 kB). View file
|
|
|
app/config.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
from pydantic_settings import BaseSettings
|
| 4 |
+
from typing import Optional
|
| 5 |
+
|
| 6 |
+
class Settings(BaseSettings):
|
| 7 |
+
OPENAI_API_KEY: Optional[str] = os.getenv("OPENAI_API_KEY", "")
|
| 8 |
+
DATABASE_URL: str = os.getenv("DATABASE_URL", "")
|
| 9 |
+
NEON_DATABASE_URL: str = os.getenv("NEON_DATABASE_URL", "")
|
| 10 |
+
QDRANT_URL: str = os.getenv("QDRANT_URL", "http://localhost:6333")
|
| 11 |
+
QDRANT_API_KEY: str = os.getenv("QDRANT_API_KEY", "")
|
| 12 |
+
OPENAI_MODEL_CHAT: str = "gpt-4o-mini"
|
| 13 |
+
OPENAI_MODEL_EMBEDDING: str = "text-embedding-3-small"
|
| 14 |
+
|
| 15 |
+
class Config:
|
| 16 |
+
env_file = ".env"
|
| 17 |
+
env_file_encoding = "utf-8"
|
| 18 |
+
extra = "ignore"
|
| 19 |
+
|
| 20 |
+
settings = Settings()
|
app/database.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sqlalchemy import create_engine
|
| 2 |
+
from sqlalchemy.orm import sessionmaker
|
| 3 |
+
from sqlalchemy.ext.declarative import declarative_base
|
| 4 |
+
from app.config import settings
|
| 5 |
+
|
| 6 |
+
# Use NEON_DATABASE_URL if available, otherwise fall back to DATABASE_URL or SQLite
|
| 7 |
+
SQLALCHEMY_DATABASE_URL = settings.NEON_DATABASE_URL or settings.DATABASE_URL
|
| 8 |
+
|
| 9 |
+
# If no database URL is provided or it's a placeholder, use SQLite as fallback
|
| 10 |
+
if (not SQLALCHEMY_DATABASE_URL or
|
| 11 |
+
SQLALCHEMY_DATABASE_URL.strip() == "" or
|
| 12 |
+
"your_" in SQLALCHEMY_DATABASE_URL or
|
| 13 |
+
SQLALCHEMY_DATABASE_URL.startswith("postgresql://your") or
|
| 14 |
+
SQLALCHEMY_DATABASE_URL.startswith("postgres://your")):
|
| 15 |
+
SQLALCHEMY_DATABASE_URL = "sqlite:///./chat_database.db"
|
| 16 |
+
print("β οΈ No database URL configured, using SQLite as fallback")
|
| 17 |
+
|
| 18 |
+
engine = create_engine(
|
| 19 |
+
SQLALCHEMY_DATABASE_URL,
|
| 20 |
+
connect_args={"check_same_thread": False} if "sqlite" in SQLALCHEMY_DATABASE_URL else {}
|
| 21 |
+
)
|
| 22 |
+
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
| 23 |
+
Base = declarative_base()
|
| 24 |
+
|
| 25 |
+
def get_db():
|
| 26 |
+
db = SessionLocal()
|
| 27 |
+
try:
|
| 28 |
+
yield db
|
| 29 |
+
finally:
|
| 30 |
+
db.close()
|
app/main.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
+
from app.routes import chat
|
| 4 |
+
from app.database import engine, Base
|
| 5 |
+
from app.qdrant_client import init_qdrant_collection
|
| 6 |
+
|
| 7 |
+
app = FastAPI(title="RAG Chatbot API")
|
| 8 |
+
|
| 9 |
+
# CORS Configuration - Allow frontend to connect
|
| 10 |
+
app.add_middleware(
|
| 11 |
+
CORSMiddleware,
|
| 12 |
+
allow_origins=["*"],
|
| 13 |
+
allow_credentials=True,
|
| 14 |
+
allow_methods=["*"],
|
| 15 |
+
allow_headers=["*"],
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
# Include routers
|
| 19 |
+
app.include_router(chat.router)
|
| 20 |
+
|
| 21 |
+
@app.on_event("startup")
|
| 22 |
+
async def startup_event():
|
| 23 |
+
# Create database tables
|
| 24 |
+
try:
|
| 25 |
+
Base.metadata.create_all(bind=engine)
|
| 26 |
+
print("β
Database tables created successfully")
|
| 27 |
+
except Exception as e:
|
| 28 |
+
print(f"β οΈ Warning: Database initialization failed: {e}")
|
| 29 |
+
|
| 30 |
+
# Initialize Qdrant collection
|
| 31 |
+
try:
|
| 32 |
+
init_qdrant_collection()
|
| 33 |
+
except Exception as e:
|
| 34 |
+
print(f"β οΈ Warning: Qdrant initialization failed: {e}")
|
| 35 |
+
print(" The API will still work but RAG features may be limited")
|
| 36 |
+
|
| 37 |
+
@app.get("/")
|
| 38 |
+
async def root():
|
| 39 |
+
return {"message": "RAG Chatbot API"}
|
| 40 |
+
|
| 41 |
+
@app.get("/api/health")
|
| 42 |
+
async def health():
|
| 43 |
+
return {"status": "ok"}
|
app/models/__pycache__/chat.cpython-311.pyc
ADDED
|
Binary file (1.17 kB). View file
|
|
|
app/models/__pycache__/translation.cpython-313.pyc
ADDED
|
Binary file (1.01 kB). View file
|
|
|
app/models/__pycache__/user.cpython-311.pyc
ADDED
|
Binary file (794 Bytes). View file
|
|
|
app/models/__pycache__/user.cpython-313.pyc
ADDED
|
Binary file (1.91 kB). View file
|
|
|
app/models/chat.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime, func
|
| 2 |
+
from sqlalchemy.orm import relationship
|
| 3 |
+
from app.database import Base
|
| 4 |
+
|
| 5 |
+
class ChatHistory(Base):
|
| 6 |
+
__tablename__ = "chat_history"
|
| 7 |
+
|
| 8 |
+
id = Column(Integer, primary_key=True, index=True)
|
| 9 |
+
user_id = Column(Integer, ForeignKey("users.id"))
|
| 10 |
+
message = Column(String)
|
| 11 |
+
response = Column(String)
|
| 12 |
+
timestamp = Column(DateTime, default=func.now())
|
| 13 |
+
|
| 14 |
+
user = relationship("User")
|
app/models/user.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sqlalchemy import Column, Integer, String
|
| 2 |
+
from app.database import Base
|
| 3 |
+
|
| 4 |
+
class User(Base):
|
| 5 |
+
__tablename__ = "users"
|
| 6 |
+
|
| 7 |
+
id = Column(Integer, primary_key=True, index=True)
|
| 8 |
+
username = Column(String, unique=True, index=True)
|
| 9 |
+
email = Column(String, unique=True, index=True)
|
app/qdrant_client.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from qdrant_client import QdrantClient
|
| 3 |
+
from qdrant_client.models import Distance, VectorParams
|
| 4 |
+
from app.config import settings
|
| 5 |
+
|
| 6 |
+
# Initialize Qdrant client
|
| 7 |
+
# Only pass api_key if it's actually set to avoid warning
|
| 8 |
+
qdrant_kwargs = {"url": settings.QDRANT_URL}
|
| 9 |
+
if settings.QDRANT_API_KEY and settings.QDRANT_API_KEY.strip():
|
| 10 |
+
qdrant_kwargs["api_key"] = settings.QDRANT_API_KEY
|
| 11 |
+
|
| 12 |
+
qdrant_client = QdrantClient(**qdrant_kwargs)
|
| 13 |
+
|
| 14 |
+
COLLECTION_NAME = "book_embeddings"
|
| 15 |
+
|
| 16 |
+
def init_qdrant_collection():
|
| 17 |
+
"""Initialize Qdrant collection if it doesn't exist"""
|
| 18 |
+
try:
|
| 19 |
+
# Check if collection exists
|
| 20 |
+
collections = qdrant_client.get_collections().collections
|
| 21 |
+
collection_names = [col.name for col in collections]
|
| 22 |
+
|
| 23 |
+
if COLLECTION_NAME not in collection_names:
|
| 24 |
+
# Create collection with vector configuration
|
| 25 |
+
qdrant_client.create_collection(
|
| 26 |
+
collection_name=COLLECTION_NAME,
|
| 27 |
+
vectors_config=VectorParams(
|
| 28 |
+
size=1536, # OpenAI text-embedding-3-small dimension
|
| 29 |
+
distance=Distance.COSINE
|
| 30 |
+
)
|
| 31 |
+
)
|
| 32 |
+
print(f"β
Created Qdrant collection: {COLLECTION_NAME}")
|
| 33 |
+
else:
|
| 34 |
+
print(f"β
Qdrant collection already exists: {COLLECTION_NAME}")
|
| 35 |
+
except Exception as e:
|
| 36 |
+
print(f"β οΈ Warning: Could not initialize Qdrant collection: {e}")
|
| 37 |
+
|
| 38 |
+
def get_qdrant_client():
|
| 39 |
+
"""Dependency to get Qdrant client"""
|
| 40 |
+
return qdrant_client
|
app/routes/__pycache__/auth.cpython-313.pyc
ADDED
|
Binary file (4.26 kB). View file
|
|
|
app/routes/__pycache__/chat.cpython-311.pyc
ADDED
|
Binary file (3.74 kB). View file
|
|
|
app/routes/__pycache__/chat.cpython-313.pyc
ADDED
|
Binary file (3.2 kB). View file
|
|
|
app/routes/__pycache__/personalize.cpython-313.pyc
ADDED
|
Binary file (2.9 kB). View file
|
|
|
app/routes/__pycache__/translate.cpython-313.pyc
ADDED
|
Binary file (2.96 kB). View file
|
|
|
app/routes/chat.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
from fastapi import APIRouter, Depends, HTTPException
|
| 6 |
+
from qdrant_client import QdrantClient
|
| 7 |
+
from app.qdrant_client import get_qdrant_client
|
| 8 |
+
from app.schemas.chat import ChatRequest, ChatResponse, ChatSelectionRequest
|
| 9 |
+
from app.services.rag_service import RAGService
|
| 10 |
+
from app.services.embeddings_service import EmbeddingsService
|
| 11 |
+
from app.services.openai_service import OpenAIService
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
router = APIRouter(prefix="/api", tags=["chat"])
|
| 17 |
+
|
| 18 |
+
def get_rag_service(
|
| 19 |
+
qdrant_client: QdrantClient = Depends(get_qdrant_client)
|
| 20 |
+
):
|
| 21 |
+
embeddings_service = EmbeddingsService()
|
| 22 |
+
openai_service = OpenAIService()
|
| 23 |
+
return RAGService(qdrant_client, embeddings_service, openai_service)
|
| 24 |
+
|
| 25 |
+
@router.post("/chat", response_model=ChatResponse)
|
| 26 |
+
async def chat(
|
| 27 |
+
request: ChatRequest,
|
| 28 |
+
rag_service: RAGService = Depends(get_rag_service)
|
| 29 |
+
):
|
| 30 |
+
try:
|
| 31 |
+
# Retrieve context from vector database
|
| 32 |
+
context = await rag_service.retrieve_context(request.question, top_k=3)
|
| 33 |
+
|
| 34 |
+
# Generate response using OpenAI
|
| 35 |
+
answer = await rag_service.generate_response(request.question, context)
|
| 36 |
+
|
| 37 |
+
# Extract sources from context
|
| 38 |
+
sources = [f"Source {i+1}" for i in range(len(context))]
|
| 39 |
+
|
| 40 |
+
return ChatResponse(answer=answer, sources=sources)
|
| 41 |
+
except Exception as e:
|
| 42 |
+
logger.error(f"Error in chat endpoint: {str(e)}", exc_info=True)
|
| 43 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 44 |
+
|
| 45 |
+
@router.post("/chat-selection", response_model=ChatResponse)
|
| 46 |
+
async def chat_selection(
|
| 47 |
+
request: ChatSelectionRequest,
|
| 48 |
+
rag_service: RAGService = Depends(get_rag_service)
|
| 49 |
+
):
|
| 50 |
+
try:
|
| 51 |
+
# Use selected text as primary context
|
| 52 |
+
context = [request.selected_text]
|
| 53 |
+
|
| 54 |
+
# Generate response
|
| 55 |
+
answer = await rag_service.generate_response(request.question, context)
|
| 56 |
+
|
| 57 |
+
return ChatResponse(answer=answer, sources=["Selected Text"])
|
| 58 |
+
except Exception as e:
|
| 59 |
+
logger.error(f"Error in chat_selection endpoint: {str(e)}", exc_info=True)
|
| 60 |
+
raise HTTPException(status_code=500, detail=str(e))
|
app/schemas/__pycache__/auth.cpython-313.pyc
ADDED
|
Binary file (3.04 kB). View file
|
|
|
app/schemas/__pycache__/chat.cpython-311.pyc
ADDED
|
Binary file (1.84 kB). View file
|
|
|
app/schemas/__pycache__/chat.cpython-313.pyc
ADDED
|
Binary file (1.59 kB). View file
|
|
|
app/schemas/__pycache__/personalize.cpython-313.pyc
ADDED
|
Binary file (1.65 kB). View file
|
|
|
app/schemas/__pycache__/translate.cpython-313.pyc
ADDED
|
Binary file (1.58 kB). View file
|
|
|
app/schemas/chat.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
|
| 5 |
+
class Message(BaseModel):
|
| 6 |
+
content: str
|
| 7 |
+
role: str
|
| 8 |
+
|
| 9 |
+
class ChatRequest(BaseModel):
|
| 10 |
+
question: str
|
| 11 |
+
user_id: Optional[int] = None
|
| 12 |
+
|
| 13 |
+
class ChatResponse(BaseModel):
|
| 14 |
+
answer: str
|
| 15 |
+
sources: List[str] = []
|
| 16 |
+
|
| 17 |
+
class ChatSelectionRequest(BaseModel):
|
| 18 |
+
question: str
|
| 19 |
+
selected_text: str
|
| 20 |
+
user_id: Optional[int] = None
|
| 21 |
+
|
| 22 |
+
class ChatSelectionResponse(BaseModel):
|
| 23 |
+
response: str
|
app/services/__pycache__/auth.cpython-313.pyc
ADDED
|
Binary file (3.72 kB). View file
|
|
|
app/services/__pycache__/embeddings_service.cpython-311.pyc
ADDED
|
Binary file (1.57 kB). View file
|
|
|
app/services/__pycache__/embeddings_service.cpython-313.pyc
ADDED
|
Binary file (1.54 kB). View file
|
|
|
app/services/__pycache__/gemini_service.cpython-313.pyc
ADDED
|
Binary file (5.49 kB). View file
|
|
|
app/services/__pycache__/openai_service.cpython-311.pyc
ADDED
|
Binary file (1.84 kB). View file
|
|
|
app/services/__pycache__/openai_service.cpython-313.pyc
ADDED
|
Binary file (1.81 kB). View file
|
|
|
app/services/__pycache__/rag_service.cpython-311.pyc
ADDED
|
Binary file (2.75 kB). View file
|
|
|
app/services/__pycache__/rag_service.cpython-313.pyc
ADDED
|
Binary file (2.7 kB). View file
|
|
|
app/services/embeddings_service.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from openai import OpenAI
|
| 2 |
+
from app.config import settings
|
| 3 |
+
import httpx
|
| 4 |
+
import asyncio
|
| 5 |
+
|
| 6 |
+
class EmbeddingsService:
|
| 7 |
+
def __init__(self):
|
| 8 |
+
# Use httpx client without problematic kwargs
|
| 9 |
+
http_client = httpx.Client()
|
| 10 |
+
self.client = OpenAI(api_key=settings.OPENAI_API_KEY, http_client=http_client)
|
| 11 |
+
self.model = "text-embedding-3-small"
|
| 12 |
+
|
| 13 |
+
async def create_embedding(self, text: str):
|
| 14 |
+
text = text.replace("\n", " ")
|
| 15 |
+
# Run the blocking OpenAI call in a thread pool
|
| 16 |
+
response = await asyncio.to_thread(
|
| 17 |
+
self.client.embeddings.create,
|
| 18 |
+
input=[text],
|
| 19 |
+
model=self.model
|
| 20 |
+
)
|
| 21 |
+
return response.data[0].embedding
|
app/services/openai_service.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from openai import OpenAI
|
| 2 |
+
from app.config import settings
|
| 3 |
+
from typing import List
|
| 4 |
+
import httpx
|
| 5 |
+
import asyncio
|
| 6 |
+
|
| 7 |
+
class OpenAIService:
|
| 8 |
+
def __init__(self):
|
| 9 |
+
# Use httpx client without problematic kwargs
|
| 10 |
+
http_client = httpx.Client()
|
| 11 |
+
self.client = OpenAI(api_key=settings.OPENAI_API_KEY, http_client=http_client)
|
| 12 |
+
self.model = "gpt-4o-mini"
|
| 13 |
+
|
| 14 |
+
async def get_chat_response(self, prompt: str, history: List[dict] = None) -> str:
|
| 15 |
+
messages = []
|
| 16 |
+
if history:
|
| 17 |
+
messages.extend(history)
|
| 18 |
+
messages.append({"role": "user", "content": prompt})
|
| 19 |
+
|
| 20 |
+
# Run the blocking OpenAI call in a thread pool
|
| 21 |
+
response = await asyncio.to_thread(
|
| 22 |
+
self.client.chat.completions.create,
|
| 23 |
+
model=self.model,
|
| 24 |
+
messages=messages
|
| 25 |
+
)
|
| 26 |
+
return response.choices[0].message.content
|
app/services/rag_service.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
import asyncio
|
| 4 |
+
from qdrant_client import QdrantClient
|
| 5 |
+
from qdrant_client.models import NamedVector
|
| 6 |
+
from typing import List
|
| 7 |
+
|
| 8 |
+
from app.services.openai_service import OpenAIService
|
| 9 |
+
from app.services.embeddings_service import EmbeddingsService
|
| 10 |
+
|
| 11 |
+
class RAGService:
|
| 12 |
+
def __init__(self, qdrant_client: QdrantClient, embeddings_service: EmbeddingsService, openai_service: OpenAIService):
|
| 13 |
+
self.qdrant_client = qdrant_client
|
| 14 |
+
self.embeddings_service = embeddings_service
|
| 15 |
+
self.openai_service = openai_service
|
| 16 |
+
self.collection_name = os.getenv("QDRANT_COLLECTION_NAME", "book_embeddings")
|
| 17 |
+
|
| 18 |
+
async def retrieve_context(self, query: str, top_k: int = 3) -> List[str]:
|
| 19 |
+
query_vector = await self.embeddings_service.create_embedding(query)
|
| 20 |
+
|
| 21 |
+
# Run synchronous Qdrant query in thread pool
|
| 22 |
+
search_result = await asyncio.to_thread(
|
| 23 |
+
self.qdrant_client.query_points,
|
| 24 |
+
collection_name=self.collection_name,
|
| 25 |
+
query=query_vector,
|
| 26 |
+
limit=top_k,
|
| 27 |
+
with_payload=True
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
context = [point.payload.get("content", "") for point in search_result.points if point.payload]
|
| 31 |
+
return context
|
| 32 |
+
|
| 33 |
+
async def generate_response(self, query: str, context: List[str]) -> str:
|
| 34 |
+
full_prompt = f"""Context: {' '.join(context)}
|
| 35 |
+
|
| 36 |
+
Question: {query}
|
| 37 |
+
|
| 38 |
+
Answer:"""
|
| 39 |
+
response = await self.openai_service.get_chat_response(full_prompt)
|
| 40 |
+
return response
|
chat_database.db
ADDED
|
File without changes
|
requirements.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi==0.111.0
|
| 2 |
+
uvicorn==0.30.1
|
| 3 |
+
openai==1.35.13
|
| 4 |
+
qdrant-client==1.9.0
|
| 5 |
+
psycopg2-binary==2.9.9
|
| 6 |
+
sqlalchemy==2.0.31
|
| 7 |
+
python-dotenv==1.0.1
|
| 8 |
+
pydantic==2.8.2
|
| 9 |
+
pydantic-settings==2.3.4
|
| 10 |
+
asyncpg==0.29.0
|
| 11 |
+
markdown==3.6
|
| 12 |
+
beautifulsoup4==4.12.3
|
run.bat
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@echo off
|
| 2 |
+
setlocal
|
| 3 |
+
|
| 4 |
+
REM Navigate to the backend directory
|
| 5 |
+
cd backend
|
| 6 |
+
|
| 7 |
+
REM Activate virtual environment
|
| 8 |
+
echo Activating virtual environment...
|
| 9 |
+
call venv\Scripts\activate
|
| 10 |
+
|
| 11 |
+
REM Start the uvicorn server
|
| 12 |
+
echo Starting FastAPI application with uvicorn...
|
| 13 |
+
uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload
|
| 14 |
+
|
| 15 |
+
endlocal
|
scripts/ingest_content.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import argparse
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
import markdown
|
| 5 |
+
from bs4 import BeautifulSoup
|
| 6 |
+
from qdrant_client import QdrantClient
|
| 7 |
+
from qdrant_client.models import Distance, VectorParams, PointStruct
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
+
|
| 10 |
+
# Add these to enable relative imports
|
| 11 |
+
import sys
|
| 12 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
| 13 |
+
|
| 14 |
+
from app.services.embeddings_service import EmbeddingsService
|
| 15 |
+
from app.qdrant_client import get_qdrant_client
|
| 16 |
+
|
| 17 |
+
load_dotenv(dotenv_path=Path(__file__).resolve().parent.parent / ".env")
|
| 18 |
+
|
| 19 |
+
QDRANT_COLLECTION_NAME = os.getenv("QDRANT_COLLECTION_NAME", "docs_collection")
|
| 20 |
+
|
| 21 |
+
def load_mdx_content(filepath: Path) -> str:
|
| 22 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 23 |
+
content = f.read()
|
| 24 |
+
# MDX is essentially Markdown, so we can convert to HTML then extract text
|
| 25 |
+
html = markdown.markdown(content)
|
| 26 |
+
soup = BeautifulSoup(html, 'html.parser')
|
| 27 |
+
return soup.get_text()
|
| 28 |
+
|
| 29 |
+
def chunk_text(text: str, chunk_size: int = 1000, overlap: int = 200) -> list[str]:
|
| 30 |
+
chunks = []
|
| 31 |
+
for i in range(0, len(text), chunk_size - overlap):
|
| 32 |
+
chunks.append(text[i:i + chunk_size])
|
| 33 |
+
return chunks
|
| 34 |
+
|
| 35 |
+
async def ingest_content(
|
| 36 |
+
docs_path: Path,
|
| 37 |
+
qdrant_client: QdrantClient,
|
| 38 |
+
embeddings_service: EmbeddingsService,
|
| 39 |
+
collection_name: str,
|
| 40 |
+
):
|
| 41 |
+
qdrant_client.recreate_collection(
|
| 42 |
+
collection_name=collection_name,
|
| 43 |
+
vectors_config=VectorParams(size=1536, distance=Distance.COSINE), # OpenAI embeddings size
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
points = []
|
| 47 |
+
point_id = 0
|
| 48 |
+
for mdx_file in docs_path.rglob("*.mdx"):
|
| 49 |
+
print(f"Processing {mdx_file}")
|
| 50 |
+
content = load_mdx_content(mdx_file)
|
| 51 |
+
chunks = chunk_text(content)
|
| 52 |
+
|
| 53 |
+
for chunk in chunks:
|
| 54 |
+
embedding = embeddings_service.create_embedding(chunk)
|
| 55 |
+
points.append(
|
| 56 |
+
PointStruct(
|
| 57 |
+
id=point_id,
|
| 58 |
+
vector=embedding,
|
| 59 |
+
payload={
|
| 60 |
+
"content": chunk,
|
| 61 |
+
"source": str(mdx_file.relative_to(docs_path))
|
| 62 |
+
}
|
| 63 |
+
)
|
| 64 |
+
)
|
| 65 |
+
point_id += 1
|
| 66 |
+
|
| 67 |
+
if len(points) >= 100: # Batch upsert
|
| 68 |
+
qdrant_client.upsert(
|
| 69 |
+
collection_name=collection_name,
|
| 70 |
+
points=points,
|
| 71 |
+
wait=True,
|
| 72 |
+
)
|
| 73 |
+
points = []
|
| 74 |
+
|
| 75 |
+
if points: # Upsert remaining points
|
| 76 |
+
qdrant_client.upsert(
|
| 77 |
+
collection_name=collection_name,
|
| 78 |
+
points=points,
|
| 79 |
+
wait=True,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
print(f"Ingestion complete. Total points: {point_id}")
|
| 83 |
+
|
| 84 |
+
if __name__ == "__main__":
|
| 85 |
+
parser = argparse.ArgumentParser(description="Ingest MDX content into Qdrant.")
|
| 86 |
+
parser.add_argument(
|
| 87 |
+
"--docs_path",
|
| 88 |
+
type=str,
|
| 89 |
+
default="../physical-ai-humanoid-robotics/docs/",
|
| 90 |
+
help="Path to the directory containing MDX documentation files."
|
| 91 |
+
)
|
| 92 |
+
args = parser.parse_args()
|
| 93 |
+
|
| 94 |
+
qdrant_client = get_qdrant_client()
|
| 95 |
+
embeddings_service = EmbeddingsService()
|
| 96 |
+
|
| 97 |
+
# Run the async ingestion
|
| 98 |
+
import asyncio
|
| 99 |
+
asyncio.run(ingest_content(
|
| 100 |
+
docs_path=Path(args.docs_path),
|
| 101 |
+
qdrant_client=qdrant_client,
|
| 102 |
+
embeddings_service=embeddings_service,
|
| 103 |
+
collection_name=QDRANT_COLLECTION_NAME
|
| 104 |
+
))
|
setup.bat
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@echo off
|
| 2 |
+
setlocal
|
| 3 |
+
|
| 4 |
+
REM Navigate to the backend directory
|
| 5 |
+
cd backend
|
| 6 |
+
|
| 7 |
+
REM Check if virtual environment exists, if not, create it
|
| 8 |
+
if not exist venv (
|
| 9 |
+
echo Creating virtual environment...
|
| 10 |
+
python -m venv venv
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
REM Activate virtual environment
|
| 14 |
+
echo Activating virtual environment...
|
| 15 |
+
call venv\Scripts\activate
|
| 16 |
+
|
| 17 |
+
REM Install dependencies
|
| 18 |
+
echo Installing dependencies from requirements.txt...
|
| 19 |
+
pip install -r requirements.txt
|
| 20 |
+
|
| 21 |
+
REM Check if .env file exists, if not, create it from .env.example
|
| 22 |
+
if not exist .env (
|
| 23 |
+
echo Creating .env from .env.example...
|
| 24 |
+
copy .env.example .env
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
echo Setup complete.
|
| 28 |
+
endlocal
|
| 29 |
+
pause
|
test_request.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"question": "What is RAG?",
|
| 3 |
+
"user_id": 1
|
| 4 |
+
}
|
test_request2.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"question": "Explain ROS2 in simple terms",
|
| 3 |
+
"user_id": 1
|
| 4 |
+
}
|
test_setup.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Quick setup verification script
|
| 3 |
+
Run this to check if your environment is configured correctly
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
import os
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
def check_env_file():
|
| 11 |
+
"""Check if .env file exists and has required variables"""
|
| 12 |
+
env_path = Path(__file__).parent / ".env"
|
| 13 |
+
|
| 14 |
+
if not env_path.exists():
|
| 15 |
+
print("β .env file not found!")
|
| 16 |
+
print(" Create it by copying .env.example and adding your credentials")
|
| 17 |
+
return False
|
| 18 |
+
|
| 19 |
+
print("β
.env file exists")
|
| 20 |
+
|
| 21 |
+
# Read and check for required variables
|
| 22 |
+
with open(env_path) as f:
|
| 23 |
+
content = f.read()
|
| 24 |
+
|
| 25 |
+
required_vars = {
|
| 26 |
+
'OPENAI_API_KEY': 'OpenAI API key',
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
missing = []
|
| 30 |
+
for var, description in required_vars.items():
|
| 31 |
+
if var not in content:
|
| 32 |
+
missing.append(f" - {var} ({description})")
|
| 33 |
+
elif f"{var}=your_" in content or f"{var}=" in content and len(content.split(f"{var}=")[1].split('\n')[0].strip()) < 10:
|
| 34 |
+
print(f"β οΈ {var} appears to be a placeholder - please set your actual {description}")
|
| 35 |
+
missing.append(f" - {var} needs to be set")
|
| 36 |
+
|
| 37 |
+
if missing:
|
| 38 |
+
print("β Missing or invalid environment variables:")
|
| 39 |
+
for m in missing:
|
| 40 |
+
print(m)
|
| 41 |
+
return False
|
| 42 |
+
|
| 43 |
+
print("β
Required environment variables are set")
|
| 44 |
+
return True
|
| 45 |
+
|
| 46 |
+
def check_dependencies():
|
| 47 |
+
"""Check if required packages are installed"""
|
| 48 |
+
required_packages = [
|
| 49 |
+
'fastapi',
|
| 50 |
+
'uvicorn',
|
| 51 |
+
'openai',
|
| 52 |
+
'qdrant_client',
|
| 53 |
+
'sqlalchemy',
|
| 54 |
+
'pydantic',
|
| 55 |
+
'pydantic_settings'
|
| 56 |
+
]
|
| 57 |
+
|
| 58 |
+
missing = []
|
| 59 |
+
for package in required_packages:
|
| 60 |
+
try:
|
| 61 |
+
__import__(package.replace('-', '_'))
|
| 62 |
+
print(f"β
{package} installed")
|
| 63 |
+
except ImportError:
|
| 64 |
+
missing.append(package)
|
| 65 |
+
print(f"β {package} not installed")
|
| 66 |
+
|
| 67 |
+
if missing:
|
| 68 |
+
print("\nβ Missing packages. Install them with:")
|
| 69 |
+
print(" pip install -r requirements.txt")
|
| 70 |
+
return False
|
| 71 |
+
|
| 72 |
+
return True
|
| 73 |
+
|
| 74 |
+
def check_openai_key():
|
| 75 |
+
"""Test if OpenAI API key is valid"""
|
| 76 |
+
try:
|
| 77 |
+
from app.config import settings
|
| 78 |
+
|
| 79 |
+
if not settings.OPENAI_API_KEY or settings.OPENAI_API_KEY.startswith('your_'):
|
| 80 |
+
print("β OPENAI_API_KEY is not set or is a placeholder")
|
| 81 |
+
print(" Please set your actual OpenAI API key in .env file")
|
| 82 |
+
return False
|
| 83 |
+
|
| 84 |
+
print("β
OPENAI_API_KEY is configured")
|
| 85 |
+
print(f" Key starts with: {settings.OPENAI_API_KEY[:10]}...")
|
| 86 |
+
return True
|
| 87 |
+
|
| 88 |
+
except Exception as e:
|
| 89 |
+
print(f"β Error loading config: {e}")
|
| 90 |
+
return False
|
| 91 |
+
|
| 92 |
+
def main():
|
| 93 |
+
print("=" * 60)
|
| 94 |
+
print("Backend Setup Verification")
|
| 95 |
+
print("=" * 60)
|
| 96 |
+
print()
|
| 97 |
+
|
| 98 |
+
checks = [
|
| 99 |
+
("Environment File", check_env_file),
|
| 100 |
+
("Dependencies", check_dependencies),
|
| 101 |
+
("OpenAI Configuration", check_openai_key),
|
| 102 |
+
]
|
| 103 |
+
|
| 104 |
+
results = []
|
| 105 |
+
for name, check_func in checks:
|
| 106 |
+
print(f"\nπ Checking: {name}")
|
| 107 |
+
print("-" * 60)
|
| 108 |
+
try:
|
| 109 |
+
result = check_func()
|
| 110 |
+
results.append(result)
|
| 111 |
+
except Exception as e:
|
| 112 |
+
print(f"β Error during check: {e}")
|
| 113 |
+
results.append(False)
|
| 114 |
+
|
| 115 |
+
print("\n" + "=" * 60)
|
| 116 |
+
if all(results):
|
| 117 |
+
print("β
All checks passed! You're ready to run the backend.")
|
| 118 |
+
print("\nStart the server with:")
|
| 119 |
+
print(" .\\run.bat")
|
| 120 |
+
print("\nOr manually:")
|
| 121 |
+
print(" .\\.venv\\Scripts\\activate")
|
| 122 |
+
print(" uvicorn app.main:app --reload --host 0.0.0.0 --port 8000")
|
| 123 |
+
else:
|
| 124 |
+
print("β Some checks failed. Please fix the issues above.")
|
| 125 |
+
print("\nQuick fixes:")
|
| 126 |
+
print("1. Make sure .env file exists with your OpenAI API key")
|
| 127 |
+
print("2. Install dependencies: pip install -r requirements.txt")
|
| 128 |
+
print("3. Check SETUP_GUIDE.md for detailed instructions")
|
| 129 |
+
print("=" * 60)
|
| 130 |
+
|
| 131 |
+
if __name__ == "__main__":
|
| 132 |
+
main()
|