Spaces:
Sleeping
Sleeping
Rollback rerank and e5-large-v2 to all-MiniLM-L6-v2
Browse files
.env
CHANGED
|
@@ -12,10 +12,10 @@ HF_API_URL=https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Inst
|
|
| 12 |
HF_TOKEN=your_huggingface_token_here
|
| 13 |
|
| 14 |
# Embedding Model
|
| 15 |
-
|
| 16 |
#EMBEDDING_MODEL=sentence-transformers/multi-qa-MiniLM-L6-cos-v1
|
| 17 |
#EMBEDDING_MODEL=BAAI/bge-small-en-v1.5
|
| 18 |
-
EMBEDDING_MODEL=intfloat/e5-large-v2
|
| 19 |
|
| 20 |
|
| 21 |
# Server Configuration
|
|
@@ -24,4 +24,4 @@ PORT=7860
|
|
| 24 |
LOG_LEVEL=debug
|
| 25 |
|
| 26 |
# CORS
|
| 27 |
-
ALLOWED_ORIGINS=http://localhost:5173,https://your-frontend.pages.dev
|
|
|
|
| 12 |
HF_TOKEN=your_huggingface_token_here
|
| 13 |
|
| 14 |
# Embedding Model
|
| 15 |
+
EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
|
| 16 |
#EMBEDDING_MODEL=sentence-transformers/multi-qa-MiniLM-L6-cos-v1
|
| 17 |
#EMBEDDING_MODEL=BAAI/bge-small-en-v1.5
|
| 18 |
+
#EMBEDDING_MODEL=intfloat/e5-large-v2
|
| 19 |
|
| 20 |
|
| 21 |
# Server Configuration
|
|
|
|
| 24 |
LOG_LEVEL=debug
|
| 25 |
|
| 26 |
# CORS
|
| 27 |
+
ALLOWED_ORIGINS=http://localhost:5173,https://your-frontend.pages.dev, https://VcRlAgent-workwise-backend.hf.space/api
|
app/routes/ask_routes.py
CHANGED
|
@@ -4,7 +4,7 @@ from fastapi import APIRouter, HTTPException
|
|
| 4 |
from app.models.jira_schema import QueryRequest, QueryResponse
|
| 5 |
from app.services.retriever import retriever
|
| 6 |
from app.services.generator import generator
|
| 7 |
-
from app.services.reranker import reranker
|
| 8 |
from app.utils.response_builder import build_query_response, extract_chart_intent
|
| 9 |
from app.utils.logger import setup_logger
|
| 10 |
from collections import Counter
|
|
@@ -33,15 +33,15 @@ async def ask_question(request: QueryRequest):
|
|
| 33 |
sources=[]
|
| 34 |
)
|
| 35 |
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
|
| 40 |
-
#
|
| 41 |
-
#
|
| 42 |
-
#
|
| 43 |
-
|
| 44 |
-
context = retriever.format_context(reranked_results)
|
| 45 |
|
| 46 |
# Generate answer
|
| 47 |
answer = generator.generate_rag_response(request.query, context)
|
|
|
|
| 4 |
from app.models.jira_schema import QueryRequest, QueryResponse
|
| 5 |
from app.services.retriever import retriever
|
| 6 |
from app.services.generator import generator
|
| 7 |
+
#from app.services.reranker import reranker
|
| 8 |
from app.utils.response_builder import build_query_response, extract_chart_intent
|
| 9 |
from app.utils.logger import setup_logger
|
| 10 |
from collections import Counter
|
|
|
|
| 33 |
sources=[]
|
| 34 |
)
|
| 35 |
|
| 36 |
+
|
| 37 |
+
# Format context
|
| 38 |
+
context = retriever.format_context(results)
|
| 39 |
|
| 40 |
+
# 🧠 Re-rank results
|
| 41 |
+
#logger.info("[RERANKER] Starting re-ranking process...")
|
| 42 |
+
#reranked_results = reranker.rerank(request.query, results, top_k=5)
|
| 43 |
+
# Format context, Use reranked results for context
|
| 44 |
+
#context = retriever.format_context(reranked_results) # ## Bug IO Error
|
| 45 |
|
| 46 |
# Generate answer
|
| 47 |
answer = generator.generate_rag_response(request.query, context)
|
app/routes/{debug_routes.py → debug_routes.py.bak}
RENAMED
|
File without changes
|
app/services/{reranker.py → reranker.py.bak}
RENAMED
|
File without changes
|
requirements.txt
CHANGED
|
@@ -8,7 +8,6 @@ pandas==2.2.0
|
|
| 8 |
numpy==1.26.3
|
| 9 |
requests==2.31.0
|
| 10 |
pydantic==2.5.3
|
| 11 |
-
python-multipart==0.0.6
|
| 12 |
tqdm==4.66.4 # progress bars
|
| 13 |
loguru==0.7.2 # clean logging
|
| 14 |
|
|
|
|
| 8 |
numpy==1.26.3
|
| 9 |
requests==2.31.0
|
| 10 |
pydantic==2.5.3
|
|
|
|
| 11 |
tqdm==4.66.4 # progress bars
|
| 12 |
loguru==0.7.2 # clean logging
|
| 13 |
|