Spaces:
Running
Running
Commit
·
fd1c27c
1
Parent(s):
577bc50
embedding model Updated
Browse files- __pycache__/config.cpython-310.pyc +0 -0
- __pycache__/pinecone_utilsA.cpython-310.pyc +0 -0
- __pycache__/pinecone_utilsB.cpython-310.pyc +0 -0
- config.py +8 -0
- neo4j_utils.py +0 -10
- pinecone_utilsA.py +17 -4
- pinecone_utilsB.py +24 -12
__pycache__/config.cpython-310.pyc
CHANGED
|
Binary files a/__pycache__/config.cpython-310.pyc and b/__pycache__/config.cpython-310.pyc differ
|
|
|
__pycache__/pinecone_utilsA.cpython-310.pyc
CHANGED
|
Binary files a/__pycache__/pinecone_utilsA.cpython-310.pyc and b/__pycache__/pinecone_utilsA.cpython-310.pyc differ
|
|
|
__pycache__/pinecone_utilsB.cpython-310.pyc
CHANGED
|
Binary files a/__pycache__/pinecone_utilsB.cpython-310.pyc and b/__pycache__/pinecone_utilsB.cpython-310.pyc differ
|
|
|
config.py
CHANGED
|
@@ -5,6 +5,9 @@ from pinecone import Pinecone, ServerlessSpec, Index
|
|
| 5 |
from langsmith import Client
|
| 6 |
from langchain_mistralai.chat_models import ChatMistralAI
|
| 7 |
from neo4j import GraphDatabase
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
# Charger les variables d'environnement
|
| 10 |
load_dotenv()
|
|
@@ -50,6 +53,11 @@ llm = ChatMistralAI(
|
|
| 50 |
verbose=True
|
| 51 |
)
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
# Vérifier si les index existent
|
| 54 |
existing_indexes = pc.list_indexes()
|
| 55 |
all_names = [idx["name"] for idx in existing_indexes]
|
|
|
|
| 5 |
from langsmith import Client
|
| 6 |
from langchain_mistralai.chat_models import ChatMistralAI
|
| 7 |
from neo4j import GraphDatabase
|
| 8 |
+
from sentence_transformers import SentenceTransformer
|
| 9 |
+
from pinecone_text.sparse import BM25Encoder
|
| 10 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
| 11 |
|
| 12 |
# Charger les variables d'environnement
|
| 13 |
load_dotenv()
|
|
|
|
| 53 |
verbose=True
|
| 54 |
)
|
| 55 |
|
| 56 |
+
# Initialiser les modèles et encodeurs
|
| 57 |
+
model = SentenceTransformer("intfloat/multilingual-e5-large")
|
| 58 |
+
sparse_encoder = BM25Encoder().default()
|
| 59 |
+
embeddings = HuggingFaceEmbeddings(model_name="intfloat/multilingual-e5-large")
|
| 60 |
+
|
| 61 |
# Vérifier si les index existent
|
| 62 |
existing_indexes = pc.list_indexes()
|
| 63 |
all_names = [idx["name"] for idx in existing_indexes]
|
neo4j_utils.py
CHANGED
|
@@ -1,6 +1,3 @@
|
|
| 1 |
-
from sentence_transformers import SentenceTransformer
|
| 2 |
-
from pinecone_text.sparse import BM25Encoder
|
| 3 |
-
from langchain.embeddings import HuggingFaceEmbeddings
|
| 4 |
from langchain.schema import HumanMessage
|
| 5 |
import json
|
| 6 |
import streamlit as st
|
|
@@ -13,13 +10,6 @@ import logging
|
|
| 13 |
logging.basicConfig(level=logging.INFO)
|
| 14 |
logger = logging.getLogger(__name__)
|
| 15 |
|
| 16 |
-
|
| 17 |
-
# Initialiser les modèles et encodeurs
|
| 18 |
-
model = SentenceTransformer("intfloat/multilingual-e5-large")
|
| 19 |
-
sparse_encoder = BM25Encoder().default()
|
| 20 |
-
embeddings = HuggingFaceEmbeddings(model_name="intfloat/multilingual-e5-large")
|
| 21 |
-
|
| 22 |
-
|
| 23 |
def extract_cypher_query(llm_output):
|
| 24 |
"""
|
| 25 |
Extrait la requête Cypher valide de la sortie du LLM.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from langchain.schema import HumanMessage
|
| 2 |
import json
|
| 3 |
import streamlit as st
|
|
|
|
| 10 |
logging.basicConfig(level=logging.INFO)
|
| 11 |
logger = logging.getLogger(__name__)
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
def extract_cypher_query(llm_output):
|
| 14 |
"""
|
| 15 |
Extrait la requête Cypher valide de la sortie du LLM.
|
pinecone_utilsA.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
from sentence_transformers import SentenceTransformer
|
| 2 |
from config import dense_index as indexA
|
|
|
|
| 3 |
import zlib
|
| 4 |
import base64
|
| 5 |
|
|
@@ -36,7 +37,6 @@ def get_existing_vectors(index):
|
|
| 36 |
|
| 37 |
def index_pdf(texts):
|
| 38 |
"""Indexe les textes dans l'index dense en évitant les doublons."""
|
| 39 |
-
model = SentenceTransformer('intfloat/multilingual-e5-large')
|
| 40 |
vectors = model.encode(texts)
|
| 41 |
|
| 42 |
# Récupérer les textes déjà indexés
|
|
@@ -73,7 +73,6 @@ def index_pdf(texts):
|
|
| 73 |
|
| 74 |
def retrieve_documents(query, k, similarity_threshold):
|
| 75 |
"""Récupère les documents pertinents en fonction de la requête."""
|
| 76 |
-
model = SentenceTransformer('intfloat/multilingual-e5-large')
|
| 77 |
query_vector = model.encode([query]).tolist()[0]
|
| 78 |
results = indexA.query(
|
| 79 |
vector=query_vector,
|
|
@@ -82,13 +81,27 @@ def retrieve_documents(query, k, similarity_threshold):
|
|
| 82 |
)
|
| 83 |
|
| 84 |
relevant_docs = []
|
|
|
|
|
|
|
| 85 |
for match in results.get("matches", []):
|
| 86 |
if "metadata" in match and "compressed_text" in match["metadata"]:
|
| 87 |
score = match.get("score", 0) # Score de similarité
|
| 88 |
if score >= similarity_threshold: # Filtrer par seuil
|
| 89 |
compressed_text = match["metadata"]["compressed_text"]
|
| 90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
else:
|
| 92 |
print(f"Skipping match due to missing metadata or compressed_text: {match}")
|
| 93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
return relevant_docs
|
|
|
|
| 1 |
from sentence_transformers import SentenceTransformer
|
| 2 |
from config import dense_index as indexA
|
| 3 |
+
from config import *
|
| 4 |
import zlib
|
| 5 |
import base64
|
| 6 |
|
|
|
|
| 37 |
|
| 38 |
def index_pdf(texts):
|
| 39 |
"""Indexe les textes dans l'index dense en évitant les doublons."""
|
|
|
|
| 40 |
vectors = model.encode(texts)
|
| 41 |
|
| 42 |
# Récupérer les textes déjà indexés
|
|
|
|
| 73 |
|
| 74 |
def retrieve_documents(query, k, similarity_threshold):
|
| 75 |
"""Récupère les documents pertinents en fonction de la requête."""
|
|
|
|
| 76 |
query_vector = model.encode([query]).tolist()[0]
|
| 77 |
results = indexA.query(
|
| 78 |
vector=query_vector,
|
|
|
|
| 81 |
)
|
| 82 |
|
| 83 |
relevant_docs = []
|
| 84 |
+
total_words = 0
|
| 85 |
+
total_tokens = 0
|
| 86 |
for match in results.get("matches", []):
|
| 87 |
if "metadata" in match and "compressed_text" in match["metadata"]:
|
| 88 |
score = match.get("score", 0) # Score de similarité
|
| 89 |
if score >= similarity_threshold: # Filtrer par seuil
|
| 90 |
compressed_text = match["metadata"]["compressed_text"]
|
| 91 |
+
text = decompress_text(compressed_text)
|
| 92 |
+
relevant_docs.append(text)
|
| 93 |
+
|
| 94 |
+
# Calcul du nombre de mots et de tokens
|
| 95 |
+
total_words += len(text.split()) # Nombre de mots (séparés par des espaces)
|
| 96 |
+
total_tokens += len(model.tokenizer.encode(text)) # Nombre de tokens
|
| 97 |
+
|
| 98 |
else:
|
| 99 |
print(f"Skipping match due to missing metadata or compressed_text: {match}")
|
| 100 |
+
num_docs = len(relevant_docs)
|
| 101 |
+
avg_words_per_doc = total_words / num_docs if num_docs > 0 else 0
|
| 102 |
+
avg_tokens_per_doc = total_tokens / num_docs if num_docs > 0 else 0
|
| 103 |
+
|
| 104 |
+
print(f"Nombre de documents récupérés : {num_docs}")
|
| 105 |
+
print(f"Moyenne de mots par document : {avg_words_per_doc:.2f}")
|
| 106 |
+
print(f"Moyenne de tokens par document : {avg_tokens_per_doc:.2f}")
|
| 107 |
return relevant_docs
|
pinecone_utilsB.py
CHANGED
|
@@ -1,8 +1,6 @@
|
|
| 1 |
-
from sentence_transformers import SentenceTransformer
|
| 2 |
-
from pinecone_text.sparse import BM25Encoder
|
| 3 |
-
from langchain.embeddings import HuggingFaceEmbeddings
|
| 4 |
import streamlit as st
|
| 5 |
from config import sparse_index as indexB
|
|
|
|
| 6 |
import nltk
|
| 7 |
import zlib
|
| 8 |
import base64
|
|
@@ -12,11 +10,6 @@ import uuid
|
|
| 12 |
|
| 13 |
nltk.download('punkt_tab')
|
| 14 |
|
| 15 |
-
# Initialiser les modèles et encodeurs
|
| 16 |
-
model = SentenceTransformer("intfloat/multilingual-e5-large")
|
| 17 |
-
sparse_encoder = BM25Encoder().default()
|
| 18 |
-
embeddings = HuggingFaceEmbeddings(model_name="intfloat/multilingual-e5-large")
|
| 19 |
-
|
| 20 |
# Initialiser l'état de session pour Streamlit
|
| 21 |
if "bm25_corpus" not in st.session_state:
|
| 22 |
st.session_state.bm25_corpus = []
|
|
@@ -133,6 +126,9 @@ def hybrid_search(query, alpha, k, similarity_threshold):
|
|
| 133 |
|
| 134 |
# Récupérer les documents pertinents
|
| 135 |
relevant_docs = []
|
|
|
|
|
|
|
|
|
|
| 136 |
for match in results.get("matches", []):
|
| 137 |
if "metadata" in match and "compressed_text" in match["metadata"]:
|
| 138 |
score = match.get("score", 0) # Score de similarité
|
|
@@ -141,17 +137,32 @@ def hybrid_search(query, alpha, k, similarity_threshold):
|
|
| 141 |
sparse_values_json = match["metadata"].get("sparse_values")
|
| 142 |
|
| 143 |
# Désérialiser les valeurs sparse si elles existent
|
| 144 |
-
if sparse_values_json
|
| 145 |
-
sparse_values = json.loads(sparse_values_json)
|
| 146 |
|
|
|
|
|
|
|
| 147 |
relevant_docs.append({
|
| 148 |
-
"text":
|
| 149 |
"sparse_values": sparse_values,
|
| 150 |
"score": score
|
| 151 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
else:
|
| 153 |
print(f"Skipping match due to missing metadata or compressed_text: {match}")
|
| 154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
return relevant_docs
|
| 156 |
|
| 157 |
except Exception as e:
|
|
@@ -159,6 +170,7 @@ def hybrid_search(query, alpha, k, similarity_threshold):
|
|
| 159 |
return []
|
| 160 |
|
| 161 |
|
|
|
|
| 162 |
def compress_text(text):
|
| 163 |
"""Compresse un texte en base64."""
|
| 164 |
compressed = zlib.compress(text.encode("utf-8"))
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
from config import sparse_index as indexB
|
| 3 |
+
from config import *
|
| 4 |
import nltk
|
| 5 |
import zlib
|
| 6 |
import base64
|
|
|
|
| 10 |
|
| 11 |
nltk.download('punkt_tab')
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
# Initialiser l'état de session pour Streamlit
|
| 14 |
if "bm25_corpus" not in st.session_state:
|
| 15 |
st.session_state.bm25_corpus = []
|
|
|
|
| 126 |
|
| 127 |
# Récupérer les documents pertinents
|
| 128 |
relevant_docs = []
|
| 129 |
+
total_words = 0
|
| 130 |
+
total_tokens = 0
|
| 131 |
+
|
| 132 |
for match in results.get("matches", []):
|
| 133 |
if "metadata" in match and "compressed_text" in match["metadata"]:
|
| 134 |
score = match.get("score", 0) # Score de similarité
|
|
|
|
| 137 |
sparse_values_json = match["metadata"].get("sparse_values")
|
| 138 |
|
| 139 |
# Désérialiser les valeurs sparse si elles existent
|
| 140 |
+
sparse_values = json.loads(sparse_values_json) if sparse_values_json else None
|
|
|
|
| 141 |
|
| 142 |
+
# Décompression du texte
|
| 143 |
+
text = decompress_text(compressed_text)
|
| 144 |
relevant_docs.append({
|
| 145 |
+
"text": text,
|
| 146 |
"sparse_values": sparse_values,
|
| 147 |
"score": score
|
| 148 |
})
|
| 149 |
+
|
| 150 |
+
# Calcul du nombre de mots et de tokens
|
| 151 |
+
total_words += len(text.split()) # Nombre de mots (séparés par des espaces)
|
| 152 |
+
total_tokens += len(model.tokenizer.encode(text)) # Nombre de tokens
|
| 153 |
+
|
| 154 |
else:
|
| 155 |
print(f"Skipping match due to missing metadata or compressed_text: {match}")
|
| 156 |
+
|
| 157 |
+
# Calcul des moyennes
|
| 158 |
+
num_docs = len(relevant_docs)
|
| 159 |
+
avg_words_per_doc = total_words / num_docs if num_docs > 0 else 0
|
| 160 |
+
avg_tokens_per_doc = total_tokens / num_docs if num_docs > 0 else 0
|
| 161 |
+
|
| 162 |
+
print(f"Nombre de documents récupérés : {num_docs}")
|
| 163 |
+
print(f"Moyenne de mots par document : {avg_words_per_doc:.2f}")
|
| 164 |
+
print(f"Moyenne de tokens par document : {avg_tokens_per_doc:.2f}")
|
| 165 |
+
|
| 166 |
return relevant_docs
|
| 167 |
|
| 168 |
except Exception as e:
|
|
|
|
| 170 |
return []
|
| 171 |
|
| 172 |
|
| 173 |
+
|
| 174 |
def compress_text(text):
|
| 175 |
"""Compresse un texte en base64."""
|
| 176 |
compressed = zlib.compress(text.encode("utf-8"))
|