Rabbitt-AI commited on
Commit
7341dff
·
verified ·
1 Parent(s): a57b15e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -97
app.py CHANGED
@@ -134,17 +134,17 @@ class MistralRAGChatbot:
134
  logging.info(f"Loaded Annoy index from {annoy_index_path}.")
135
  return annoy_index
136
 
137
- # def calculate_tfidf(self, texts: List[str]) -> Tuple[np.ndarray, TfidfVectorizer]:
138
- # vectorizer = TfidfVectorizer(stop_words='english')
139
- # tfidf_matrix = vectorizer.fit_transform(texts)
140
- # logging.info("TF-IDF matrix calculated.")
141
- # return tfidf_matrix, vectorizer
142
-
143
- # def train_word2vec(self, texts: List[str]) -> Word2Vec:
144
- # tokenized_texts = [text.split() for text in texts]
145
- # model = Word2Vec(sentences=tokenized_texts, vector_size=100, window=5, min_count=1, workers=4)
146
- # logging.info("Word2Vec model trained.")
147
- # return model
148
 
149
  async def get_text_embedding(self, text: str, model: str = "mistral-embed") -> np.ndarray:
150
  try:
@@ -250,12 +250,12 @@ class MistralRAGChatbot:
250
  logging.debug(f"Annoy retrieval returned {len(indices)} documents.")
251
  return indices, scores
252
 
253
- # def retrieve_with_tfidf(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
254
- # query_vec = self.tfidf_vectorizer.transform([user_query])
255
- # similarities = cosine_similarity(query_vec, self.tfidf_matrix).flatten()
256
- # indices = np.argsort(-similarities)[:top_k]
257
- # logging.debug(f"TF-IDF retrieval returned {len(indices)} documents.")
258
- # return indices, similarities[indices].tolist()
259
 
260
  def retrieve_with_bm25(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
261
  tokenized_query = user_query.split()
@@ -264,38 +264,38 @@ class MistralRAGChatbot:
264
  logging.debug(f"BM25 retrieval returned {len(indices)} documents.")
265
  return indices, scores[indices].tolist()
266
 
267
- # def retrieve_with_word2vec(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
268
- # query_tokens = user_query.split()
269
- # query_vec = np.mean([self.word2vec_model.wv[token] for token in query_tokens if token in self.word2vec_model.wv], axis=0)
270
- # expected_dim = query_vec.shape[0]
271
- # doc_vectors = []
272
- # for doc in self.texts:
273
- # word_vectors = [self.word2vec_model.wv[token] for token in doc.split() if token in self.word2vec_model.wv]
274
- # avg_vector = np.mean(word_vectors, axis=0) if word_vectors else np.zeros(expected_dim)
275
- # doc_vectors.append(avg_vector)
276
- # doc_vectors = np.array(doc_vectors)
277
- # similarities = cosine_similarity([query_vec], doc_vectors).flatten()
278
- # indices = np.argsort(-similarities)[:top_k]
279
- # return indices, similarities[indices].tolist()
280
-
281
- # def retrieve_with_euclidean(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
282
- # distances = euclidean_distances([query_embedding], self.embeddings).flatten()
283
- # indices = np.argsort(distances)[:top_k]
284
- # logging.debug(f"Euclidean retrieval returned {len(indices)} documents.")
285
- # return indices, distances[indices].tolist()
286
-
287
- # def retrieve_with_jaccard(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
288
- # query_set = set(user_query.lower().split())
289
- # scores = []
290
- # for doc in self.texts:
291
- # doc_set = set(doc.lower().split())
292
- # intersection = query_set.intersection(doc_set)
293
- # union = query_set.union(doc_set)
294
- # score = float(len(intersection)) / len(union) if union else 0
295
- # scores.append(score)
296
- # indices = np.argsort(-np.array(scores))[:top_k]
297
- # logging.debug(f"Jaccard retrieval returned {len(indices)} documents.")
298
- # return indices.tolist(), [scores[i] for i in indices]
299
 
300
  def rerank_documents(
301
  self,
@@ -312,54 +312,54 @@ class MistralRAGChatbot:
312
 
313
  return reranked_docs
314
 
315
- # def reciprocal_rank_fusion(self, user_query: str, docs: List[dict]) -> List[dict]:
316
- # k = 60
317
- # method_ranks = {}
318
- # fused_scores = {}
319
- # for doc in docs:
320
- # method = doc['method']
321
- # if method not in method_ranks:
322
- # method_ranks[method] = {doc['index']: 1}
323
- # else:
324
- # method_ranks[method][doc['index']] = len(method_ranks[method]) + 1
325
- # for doc in docs:
326
- # idx = doc['index']
327
- # if idx not in fused_scores:
328
- # fused_scores[idx] = sum(1 / (k + rank) for method_rank in method_ranks.values() for i, rank in method_rank.items() if i == idx)
329
- # reranked_docs = sorted(docs, key=lambda x: fused_scores.get(x['index'], 0), reverse=True)
330
- # for doc in reranked_docs:
331
- # doc['rrf_score'] = fused_scores.get(doc['index'], 0)
332
- # return reranked_docs
333
-
334
- # def weighted_score_fusion(self, user_query: str, docs: List[dict]) -> List[dict]:
335
- # method_weights = {
336
- # 'annoy': 0.3,
337
- # 'tfidf': 0.2,
338
- # 'bm25': 0.2,
339
- # 'word2vec': 0.1,
340
- # 'euclidean': 0.1,
341
- # 'jaccard': 0.1
342
- # }
343
- # fused_scores = {}
344
- # for doc in docs:
345
- # idx = doc['index']
346
- # if idx not in fused_scores:
347
- # fused_scores[idx] = doc['score'] * method_weights[doc['method']]
348
- # else:
349
- # fused_scores[idx] += doc['score'] * method_weights[doc['method']]
350
-
351
- # reranked_docs = sorted(docs, key=lambda x: fused_scores[x['index']], reverse=True)
352
- # for doc in reranked_docs:
353
- # doc['wsf_score'] = fused_scores[doc['index']]
354
- # return reranked_docs
355
-
356
- # def semantic_similarity_reranking(self, user_query: str, docs: List[dict]) -> List[dict]:
357
- # query_embedding = np.mean([self.word2vec_model.wv[token] for token in user_query.split() if token in self.word2vec_model.wv], axis=0)
358
- # for doc in docs:
359
- # doc_embedding = np.mean([self.word2vec_model.wv[token] for token in doc['text'].split() if token in self.word2vec_model.wv], axis=0)
360
- # doc_embedding = doc_embedding if doc_embedding.shape == query_embedding.shape else np.zeros(query_embedding.shape)
361
- # doc['semantic_score'] = cosine_similarity([query_embedding], [doc_embedding])[0][0]
362
- # return sorted(docs, key=lambda x: x['semantic_score'], reverse=True)
363
 
364
  def build_prompt(self, context: str, user_query: str, response_style: str) -> str:
365
  styles = {
 
134
  logging.info(f"Loaded Annoy index from {annoy_index_path}.")
135
  return annoy_index
136
 
137
+ def calculate_tfidf(self, texts: List[str]) -> Tuple[np.ndarray, TfidfVectorizer]:
138
+ vectorizer = TfidfVectorizer(stop_words='english')
139
+ tfidf_matrix = vectorizer.fit_transform(texts)
140
+ logging.info("TF-IDF matrix calculated.")
141
+ return tfidf_matrix, vectorizer
142
+
143
+ def train_word2vec(self, texts: List[str]) -> Word2Vec:
144
+ tokenized_texts = [text.split() for text in texts]
145
+ model = Word2Vec(sentences=tokenized_texts, vector_size=100, window=5, min_count=1, workers=4)
146
+ logging.info("Word2Vec model trained.")
147
+ return model
148
 
149
  async def get_text_embedding(self, text: str, model: str = "mistral-embed") -> np.ndarray:
150
  try:
 
250
  logging.debug(f"Annoy retrieval returned {len(indices)} documents.")
251
  return indices, scores
252
 
253
+ def retrieve_with_tfidf(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
254
+ query_vec = self.tfidf_vectorizer.transform([user_query])
255
+ similarities = cosine_similarity(query_vec, self.tfidf_matrix).flatten()
256
+ indices = np.argsort(-similarities)[:top_k]
257
+ logging.debug(f"TF-IDF retrieval returned {len(indices)} documents.")
258
+ return indices, similarities[indices].tolist()
259
 
260
  def retrieve_with_bm25(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
261
  tokenized_query = user_query.split()
 
264
  logging.debug(f"BM25 retrieval returned {len(indices)} documents.")
265
  return indices, scores[indices].tolist()
266
 
267
+ def retrieve_with_word2vec(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
268
+ query_tokens = user_query.split()
269
+ query_vec = np.mean([self.word2vec_model.wv[token] for token in query_tokens if token in self.word2vec_model.wv], axis=0)
270
+ expected_dim = query_vec.shape[0]
271
+ doc_vectors = []
272
+ for doc in self.texts:
273
+ word_vectors = [self.word2vec_model.wv[token] for token in doc.split() if token in self.word2vec_model.wv]
274
+ avg_vector = np.mean(word_vectors, axis=0) if word_vectors else np.zeros(expected_dim)
275
+ doc_vectors.append(avg_vector)
276
+ doc_vectors = np.array(doc_vectors)
277
+ similarities = cosine_similarity([query_vec], doc_vectors).flatten()
278
+ indices = np.argsort(-similarities)[:top_k]
279
+ return indices, similarities[indices].tolist()
280
+
281
+ def retrieve_with_euclidean(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
282
+ distances = euclidean_distances([query_embedding], self.embeddings).flatten()
283
+ indices = np.argsort(distances)[:top_k]
284
+ logging.debug(f"Euclidean retrieval returned {len(indices)} documents.")
285
+ return indices, distances[indices].tolist()
286
+
287
+ def retrieve_with_jaccard(self, user_query: str, query_embedding: np.ndarray, top_k: int) -> Tuple[List[int], List[float]]:
288
+ query_set = set(user_query.lower().split())
289
+ scores = []
290
+ for doc in self.texts:
291
+ doc_set = set(doc.lower().split())
292
+ intersection = query_set.intersection(doc_set)
293
+ union = query_set.union(doc_set)
294
+ score = float(len(intersection)) / len(union) if union else 0
295
+ scores.append(score)
296
+ indices = np.argsort(-np.array(scores))[:top_k]
297
+ logging.debug(f"Jaccard retrieval returned {len(indices)} documents.")
298
+ return indices.tolist(), [scores[i] for i in indices]
299
 
300
  def rerank_documents(
301
  self,
 
312
 
313
  return reranked_docs
314
 
315
+ def reciprocal_rank_fusion(self, user_query: str, docs: List[dict]) -> List[dict]:
316
+ k = 60
317
+ method_ranks = {}
318
+ fused_scores = {}
319
+ for doc in docs:
320
+ method = doc['method']
321
+ if method not in method_ranks:
322
+ method_ranks[method] = {doc['index']: 1}
323
+ else:
324
+ method_ranks[method][doc['index']] = len(method_ranks[method]) + 1
325
+ for doc in docs:
326
+ idx = doc['index']
327
+ if idx not in fused_scores:
328
+ fused_scores[idx] = sum(1 / (k + rank) for method_rank in method_ranks.values() for i, rank in method_rank.items() if i == idx)
329
+ reranked_docs = sorted(docs, key=lambda x: fused_scores.get(x['index'], 0), reverse=True)
330
+ for doc in reranked_docs:
331
+ doc['rrf_score'] = fused_scores.get(doc['index'], 0)
332
+ return reranked_docs
333
+
334
+ def weighted_score_fusion(self, user_query: str, docs: List[dict]) -> List[dict]:
335
+ method_weights = {
336
+ 'annoy': 0.3,
337
+ 'tfidf': 0.2,
338
+ 'bm25': 0.2,
339
+ 'word2vec': 0.1,
340
+ 'euclidean': 0.1,
341
+ 'jaccard': 0.1
342
+ }
343
+ fused_scores = {}
344
+ for doc in docs:
345
+ idx = doc['index']
346
+ if idx not in fused_scores:
347
+ fused_scores[idx] = doc['score'] * method_weights[doc['method']]
348
+ else:
349
+ fused_scores[idx] += doc['score'] * method_weights[doc['method']]
350
+
351
+ reranked_docs = sorted(docs, key=lambda x: fused_scores[x['index']], reverse=True)
352
+ for doc in reranked_docs:
353
+ doc['wsf_score'] = fused_scores[doc['index']]
354
+ return reranked_docs
355
+
356
+ def semantic_similarity_reranking(self, user_query: str, docs: List[dict]) -> List[dict]:
357
+ query_embedding = np.mean([self.word2vec_model.wv[token] for token in user_query.split() if token in self.word2vec_model.wv], axis=0)
358
+ for doc in docs:
359
+ doc_embedding = np.mean([self.word2vec_model.wv[token] for token in doc['text'].split() if token in self.word2vec_model.wv], axis=0)
360
+ doc_embedding = doc_embedding if doc_embedding.shape == query_embedding.shape else np.zeros(query_embedding.shape)
361
+ doc['semantic_score'] = cosine_similarity([query_embedding], [doc_embedding])[0][0]
362
+ return sorted(docs, key=lambda x: x['semantic_score'], reverse=True)
363
 
364
  def build_prompt(self, context: str, user_query: str, response_style: str) -> str:
365
  styles = {