GitHub Actions commited on
Commit
47a10a7
·
1 Parent(s): bfbc9d2

Sync from GitHub Actions

Browse files
Lawverse/evaluation/metrics.py CHANGED
@@ -35,10 +35,14 @@ class RagasMetrics:
35
 
36
  @staticmethod
37
  def f_recall(pred_answer, true_answer):
38
- tp = sum(1 for p, t in zip(pred_answer, true_answer) if t.lower() in p.lower())
39
- fn = len(true_answer) - tp
40
- recall = tp / (tp + fn + 1e-8)
41
- return round(recall, 4)
 
 
 
 
42
 
43
  def compute_all_metrics(dataset : Dataset, preds, trues, llm : BaseRagasLLM, run_config : RunConfig):
44
  ragas = RagasMetrics()
 
35
 
36
  @staticmethod
37
  def f_recall(pred_answer, true_answer):
38
+ pred_tokens = set(" ".join(pred_answer).lower().split())
39
+ true_tokens = set(" ".join(true_answer).lower().split())
40
+
41
+ tp = len(pred_tokens & true_tokens)
42
+ fn = len(true_tokens - pred_tokens)
43
+
44
+ return round(tp / (tp + fn + 1e-8), 4)
45
+
46
 
47
  def compute_all_metrics(dataset : Dataset, preds, trues, llm : BaseRagasLLM, run_config : RunConfig):
48
  ragas = RagasMetrics()
Lawverse/pipeline/llm_loader.py CHANGED
@@ -1,34 +1,4 @@
1
- from typing import Optional, List, Any
2
- from langchain_core.language_models.llms import LLM
3
- from openai import OpenAI
4
  import os
5
  from langchain_openai import ChatOpenAI
6
 
7
- # env = environ.Env()
8
- # environ.Env.read_env(Path(__file__).resolve().parent.parent.parent / ".env")
9
-
10
- # class LightningLLM(LLM):
11
- # def __init__(self, api_key: str, model: str, **kwargs):
12
- # super().__init__(**kwargs)
13
- # object.__setattr__(self, "client", OpenAI(base_url="https://lightning.ai/api/v1/", api_key=api_key))
14
- # object.__setattr__(self, "model", model)
15
-
16
- # def _call(self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional = None, **kwargs: Any) -> str:
17
-
18
- # resp = self.client.chat.completions.create(
19
- # model=self.model,
20
- # messages=[{"role": "user", "content": [{"type": "text", "text": prompt}]}],
21
- # temperature=0.1,
22
- # max_tokens=10240
23
- # )
24
- # return resp.choices[0].message.content
25
-
26
- # @property
27
- # def _identifying_params(self):
28
- # return {"model": self.model}
29
-
30
- # @property
31
- # def _llm_type(self):
32
- # return "lightning_gpt"
33
-
34
- llm = ChatOpenAI(base_url='https://lightning.ai/api/v1/', api_key=os.getenv("API_KEY"), model=os.getenv("MODEL"))
 
 
 
 
1
  import os
2
  from langchain_openai import ChatOpenAI
3
 
4
+ llm = ChatOpenAI(base_url='https://lightning.ai/api/v1/', api_key=os.getenv("API_KEY"), model="google/gemini-2.5-flash",)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Lawverse/pipeline/rag_pipeline.py CHANGED
@@ -11,8 +11,10 @@ from Lawverse.logger import logging
11
  from Lawverse.exception import ExceptionHandle
12
  from Lawverse.utils.config import FAISS_PATH
13
 
14
- from langchain_classic.chains.conversational_retrieval.base import ConversationalRetrievalChain
15
- from langchain_core.prompts import PromptTemplate
 
 
16
  import sys
17
  import pickle
18
 
@@ -117,20 +119,41 @@ def rag_components():
117
  def create_chat_chian(components, chat_id=None):
118
  try:
119
  memory_manager = ChatMemory(chat_id=chat_id)
120
- memory = memory_manager.memory
121
-
122
- chain = ConversationalRetrievalChain.from_llm(
123
- llm=llm,
124
- retriever=components["retriever"],
125
- combine_docs_chain_kwargs={"prompt": components["qa_prompt"]},
126
- memory=memory,
127
- return_source_documents=True,
128
- output_key="answer"
129
  )
130
 
131
- logging.info("RAG chain initialized successfully.")
132
- return chain, memory_manager
133
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  except Exception as e:
135
- logging.error(f"Chat chain creation failed")
136
  raise ExceptionHandle(e, sys)
 
11
  from Lawverse.exception import ExceptionHandle
12
  from Lawverse.utils.config import FAISS_PATH
13
 
14
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate
15
+ from langchain_classic.chains.combine_documents import create_stuff_documents_chain
16
+ from langchain_classic.chains.retrieval import create_retrieval_chain
17
+ from langchain_classic.chains.history_aware_retriever import create_history_aware_retriever
18
  import sys
19
  import pickle
20
 
 
119
  def create_chat_chian(components, chat_id=None):
120
  try:
121
  memory_manager = ChatMemory(chat_id=chat_id)
122
+ retriever = components["retriever"]
123
+
124
+ contextualize_q_system_prompt = (
125
+ "Given a chat history and the latest user question "
126
+ "which might reference context in the chat history, "
127
+ "formulate a standalone question which can be understood "
128
+ "without the chat history. Do NOT answer the question, "
129
+ "just reformulate it if needed and otherwise return it as is."
 
130
  )
131
 
132
+ contextualize_q_prompt = ChatPromptTemplate.from_messages(
133
+ [
134
+ ("system", contextualize_q_system_prompt),
135
+ MessagesPlaceholder("chat_history"),
136
+ ("human", "{input}"),
137
+ ]
138
+ )
139
+ history_aware_retriever = create_history_aware_retriever(
140
+ llm, retriever, contextualize_q_prompt
141
+ )
142
+ template_str = components["qa_prompt"].template
143
+ if "{question}" in template_str:
144
+ template_str = template_str.replace("{question}", "{input}")
145
+
146
+ qa_prompt = ChatPromptTemplate.from_template(template_str)
147
+ question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
148
+
149
+ rag_chain = create_retrieval_chain(
150
+ history_aware_retriever, question_answer_chain
151
+ )
152
+ final_chain = rag_chain | (lambda x: x["answer"])
153
+
154
+ logging.info("LCEL RAG chain initialized successfully.")
155
+ return final_chain, memory_manager
156
+
157
  except Exception as e:
158
+ logging.error(f"Chat chain creation failed: {e}")
159
  raise ExceptionHandle(e, sys)
api/app.py CHANGED
@@ -1,5 +1,5 @@
1
  from Lawverse.pipeline.rag_pipeline import rag_components, create_chat_chian
2
- from flask import Flask, render_template, request, jsonify, session
3
  from Lawverse.utils.config import MEMORY_DIR
4
  from Lawverse.logger import logging
5
  from Lawverse.monitoring.dashboard import monitor_bp
@@ -70,13 +70,17 @@ def rag_response():
70
  if not query:
71
  return jsonify({"error": "Empty message"}), 400
72
 
73
- result = qa({"question": query})
74
- memory_manager.save_memory()
75
-
76
- answer_markdown = result.get("answer","")
77
- rendered_html = markdown.markdown(answer_markdown)
 
 
 
 
78
 
79
- return jsonify({"answer": rendered_html})
80
 
81
  except Exception as e:
82
  logging.error(f"Chat error: {e}")
 
1
  from Lawverse.pipeline.rag_pipeline import rag_components, create_chat_chian
2
+ from flask import Flask, render_template, request, jsonify, session, stream_with_context, Response
3
  from Lawverse.utils.config import MEMORY_DIR
4
  from Lawverse.logger import logging
5
  from Lawverse.monitoring.dashboard import monitor_bp
 
70
  if not query:
71
  return jsonify({"error": "Empty message"}), 400
72
 
73
+ def generate():
74
+ try:
75
+ for chunk in qa.stream({"input": query}):
76
+ yield chunk
77
+ memory_manager.save_memory()
78
+
79
+ except Exception as e:
80
+ logging.error(f"Error during stream generation: {e}")
81
+ yield f"**Error:** An error occurred while processing your request."
82
 
83
+ return Response(stream_with_context(generate()), mimetype='text/plain')
84
 
85
  except Exception as e:
86
  logging.error(f"Chat error: {e}")
templates/chat.html CHANGED
@@ -346,30 +346,44 @@
346
 
347
  addMessage(text, true);
348
  messageInput.value = "";
349
-
350
  const typing = showTyping();
351
-
352
  try {
353
- const response = await fetch("/response", {
354
- method: "POST",
355
- headers: { "Content-Type": "application/json" },
356
- body: JSON.stringify({ message: text }),
357
- });
358
-
359
- const data = await response.json();
360
- typing.remove();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
 
362
- if (data.answer) {
363
- addMessage(data.answer, false);
364
  loadSidebarChats();
365
- } else if (data.error) {
366
- addMessage(`⚠️ Error: ${data.error}`, false);
367
- } else {
368
- addMessage("⚠️ Unexpected response from server.", false);
369
- }
370
- } catch (error) {
371
- typing.remove();
372
- addMessage("❌ Error: Unable to connect to the server.", false);
373
  }
374
  }
375
 
 
346
 
347
  addMessage(text, true);
348
  messageInput.value = "";
 
349
  const typing = showTyping();
 
350
  try {
351
+ const response = await fetch("/response", {
352
+ method: "POST",
353
+ headers: { "Content-Type": "application/json" },
354
+ body: JSON.stringify({ message: text }),
355
+ });
356
+ const reader = response.body.getReader();
357
+ const decoder = new TextDecoder();
358
+
359
+ typing.remove();
360
+ const aiBubble = document.createElement("div");
361
+ aiBubble.className = "message-bubble ai-message";
362
+ aiBubble.innerHTML = `
363
+ <div class="flex items-start">
364
+ <div class="w-8 h-8 rounded-full bg-gradient-to-r from-blue-500 to-cyan-500 mr-3"></div>
365
+ <div>
366
+ <p class="font-semibold text-cyan-300 mb-1">Lawverse AI</p>
367
+ <div class="message-content"></div>
368
+ </div>
369
+ </div>`;
370
+ chatMessages.appendChild(aiBubble);
371
+ const contentDiv = aiBubble.querySelector(".message-content");
372
+
373
+ while (true) {
374
+ const { value, done } = await reader.read();
375
+ if (done) break;
376
+
377
+ const chunk = decoder.decode(value);
378
+ contentDiv.innerHTML += chunk;
379
+ chatMessages.scrollTop = chatMessages.scrollHeight;
380
+ }
381
 
 
 
382
  loadSidebarChats();
383
+
384
+ } catch (err) {
385
+ typing.remove();
386
+ addMessage(" Streaming failed: " + err.message, false);
 
 
 
 
387
  }
388
  }
389