Spaces:
Running
Running
| # main.py | |
| from fastapi import FastAPI, Request | |
| from fastapi.responses import HTMLResponse | |
| from fastapi.staticfiles import StaticFiles | |
| from fastapi.templating import Jinja2Templates | |
| import uvicorn | |
| import torch | |
| from models import AnalysisRequest, AnalysisResponse | |
| import logic | |
| app = FastAPI(title="SEO AI Editor MVP") | |
| # Подключаем папку с шаблонами | |
| templates = Jinja2Templates(directory="templates") | |
| async def startup_event(): | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| print(f"🚀 Application starting. ML Device: {device}") | |
| logic.load_models() # spaCy preload (optional) | |
| # --- НОВЫЙ РОУТ ДЛЯ ГЛАВНОЙ СТРАНИЦЫ --- | |
| async def read_root(request: Request): | |
| # Рендерим файл index.html | |
| return templates.TemplateResponse("index.html", {"request": request}) | |
| async def analyze_text(request: AnalysisRequest): | |
| # Логика та же самая, что и была | |
| ngram_stats_result = logic.calculate_ngram_stats( | |
| request.target_text, | |
| request.competitors, | |
| request.language | |
| ) | |
| key_phrases, key_words_unigrams = logic.parse_keywords(request.keywords, request.language) | |
| bm25_recs = logic.calculate_bm25_recommendations( | |
| request.target_text, | |
| request.competitors, | |
| request.keywords, # <-- ИЗМЕНЕНИЕ ЗДЕСЬ (было key_words_unigrams) | |
| request.language | |
| ) | |
| bert_results = logic.perform_bert_analysis( | |
| request.target_text, | |
| request.competitors, # <-- ДОБАВИЛИ ЭТОТ АРГУМЕНТ | |
| key_phrases, | |
| request.language | |
| ) | |
| return AnalysisResponse( | |
| ngram_stats=ngram_stats_result, | |
| bm25_recommendations=bm25_recs, | |
| bert_analysis=bert_results | |
| ) | |
| if __name__ == "__main__": | |
| uvicorn.run("main:app", host="127.0.0.1", port=8001, reload=True) |