Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -115,25 +115,46 @@ class QASystem:
|
|
| 115 |
graph_builder = StateGraph(MessagesState)
|
| 116 |
|
| 117 |
def query_or_respond(state: MessagesState):
|
| 118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
return {"messages": [response]}
|
| 120 |
|
| 121 |
def generate(state: MessagesState):
|
| 122 |
-
|
| 123 |
-
|
|
|
|
|
|
|
| 124 |
system_prompt = (
|
| 125 |
-
"You are a senior legal assistant
|
| 126 |
-
"
|
| 127 |
-
f"{
|
| 128 |
)
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
]
|
| 133 |
-
|
| 134 |
response = llm.invoke(messages)
|
| 135 |
return {"messages": [response]}
|
| 136 |
|
|
|
|
| 137 |
graph_builder.add_node("query_or_respond", query_or_respond)
|
| 138 |
graph_builder.add_node("generate", generate)
|
| 139 |
|
|
|
|
| 115 |
graph_builder = StateGraph(MessagesState)
|
| 116 |
|
| 117 |
def query_or_respond(state: MessagesState):
|
| 118 |
+
retrieved_docs = [m for m in state["messages"] if m.type == "tool"]
|
| 119 |
+
|
| 120 |
+
if retrieved_docs:
|
| 121 |
+
context = ' '.join(m.content for m in retrieved_docs)
|
| 122 |
+
else:
|
| 123 |
+
context = "Legal knowledge system. Use Indian judiciary references."
|
| 124 |
+
|
| 125 |
+
system_prompt = (
|
| 126 |
+
"You are a senior legal assistant with expertise in Indian law. "
|
| 127 |
+
"Always provide legally accurate responses with references to Indian judiciary principles. "
|
| 128 |
+
"If the user query is not legal-specific, still respond from a legal perspective."
|
| 129 |
+
f"\n\nContext:\n{context}"
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
messages = [SystemMessage(content=system_prompt)] + state["messages"]
|
| 133 |
+
|
| 134 |
+
logger.info(f"Sending to LLM: {[m.content for m in messages]}") # Debugging log
|
| 135 |
+
|
| 136 |
+
response = llm.invoke(messages)
|
| 137 |
return {"messages": [response]}
|
| 138 |
|
| 139 |
def generate(state: MessagesState):
|
| 140 |
+
retrieved_docs = [m for m in reversed(state["messages"]) if m.type == "tool"][::-1]
|
| 141 |
+
|
| 142 |
+
context = ' '.join(m.content for m in retrieved_docs) if retrieved_docs else "Legal knowledge system."
|
| 143 |
+
|
| 144 |
system_prompt = (
|
| 145 |
+
"You are a senior legal assistant specializing in Indian judiciary matters. "
|
| 146 |
+
"Your responses MUST be legally accurate, concise (5 sentences max), and reference Indian laws when applicable."
|
| 147 |
+
f"\n\nContext:\n{context}"
|
| 148 |
)
|
| 149 |
+
|
| 150 |
+
messages = [SystemMessage(content=system_prompt)] + state["messages"]
|
| 151 |
+
|
| 152 |
+
logger.info(f"Sending to LLM: {[m.content for m in messages]}") # Debugging log
|
| 153 |
+
|
| 154 |
response = llm.invoke(messages)
|
| 155 |
return {"messages": [response]}
|
| 156 |
|
| 157 |
+
|
| 158 |
graph_builder.add_node("query_or_respond", query_or_respond)
|
| 159 |
graph_builder.add_node("generate", generate)
|
| 160 |
|