gitgud-ai / app /main.py
CodeCommunity's picture
Update app/main.py
f5f0249 verified
raw
history blame
4.2 kB
# main.py - Final Fixed Version
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import List, Optional
import re
import logging
from app.services.reviewer_service import AIReviewerService
from app.predictor import classifier, guide_generator
# 1. Setup Logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# 2. Initialize FastAPI and Services
app = FastAPI(title="GitGud AI Service")
reviewer_service = AIReviewerService()
# 3. Data Models (Order matters: ReviewRequest needs FileRequest)
class FileRequest(BaseModel):
fileName: str
content: Optional[str] = None
class ReviewRequest(BaseModel):
files: List[FileRequest]
class GuideRequest(BaseModel):
repoName: str
filePaths: List[str]
# 4. Endpoints
@app.get("/")
def health_check():
"""Checks server status and GPU availability."""
return {
"status": "online",
"model": "microsoft/codebert-base",
"device": classifier.device,
}
@app.post("/classify")
async def classify_file(request: FileRequest):
"""Classifies file into architectural layers."""
try:
result = classifier.predict(request.fileName, request.content)
return {
"fileName": request.fileName,
"layer": result["label"],
"confidence": result["confidence"],
"embedding": result["embedding"]
}
except Exception as e:
logger.error(f"Classify failed: {e}")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/generate-guide")
async def generate_guide(request: GuideRequest):
"""Generates markdown guides for repositories."""
try:
markdown = guide_generator.generate_markdown(request.repoName, request.filePaths)
return {"markdown": markdown}
except Exception as e:
logger.error(f"Guide generation failed: {e}")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/review")
async def review_code(request: ReviewRequest):
"""Detects security and logic issues in batches of files."""
try:
# Call the batch review logic from your service
results = reviewer_service.review_batch_code(request.files)
return {"reviews": results}
except Exception as e:
logger.error(f"Review endpoint failed: {e}")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/repo-dashboard-stats")
async def get_dashboard_stats(request: ReviewRequest):
try:
raw_reviews = reviewer_service.review_batch_code(request.files)
# 1. Security Count
total_vulns = sum(len(r.get("vulnerabilities", [])) for r in raw_reviews)
# 2. Performance Ratio (Maintainability)
# We use a default of 8 if the AI misses a file to avoid 0% scores
scores = [r.get("metrics", {}).get("maintainability", 8) for r in raw_reviews]
avg_maintainability = (sum(scores) / len(scores)) * 10 if scores else 0
# 3. API Sniffing
found_apis = []
for f in request.files:
if f.content:
# Regex looks for common route decorators or methods
matches = re.findall(r'(?:get|post|put|delete|patch)\([\'"]\/(.*?)[\'"]', f.content.lower())
for match in matches:
found_apis.append(f"/{match}")
# 4. Repo Health Calculation
# Every security issue drops health by 10 points
health_score = max(10, 100 - (total_vulns * 10))
return {
"repo_health": health_score,
"health_label": "Excellent Health" if health_score > 80 else "Needs Review",
"security_issues": total_vulns,
"performance_ratio": f"{int(avg_maintainability)}%",
"exposed_apis": list(set(found_apis))[:10]
}
except Exception as e:
logger.error(f"Dashboard stats failed: {e}")
raise HTTPException(status_code=500, detail="Failed to aggregate repository stats")
# 5. Application Entry Point
if __name__ == "__main__":
import uvicorn
# Port 7860 is mandatory for Hugging Face Spaces
uvicorn.run(app, host="0.0.0.0", port=7860)