CaffeinatedCoding commited on
Commit
4910a59
·
verified ·
1 Parent(s): 0cf2bc5

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. src/agent_v2.py +61 -17
src/agent_v2.py CHANGED
@@ -54,6 +54,8 @@ def empty_case_state() -> Dict:
54
  "turn_count": 0,
55
  "facts_missing": [],
56
  "context_interpreted": False,
 
 
57
  }
58
 
59
 
@@ -159,6 +161,7 @@ NEW USER MESSAGE:
159
 
160
  Rules:
161
  - If last_response_type was "question", action_needed CANNOT be "question"
 
162
  - Extract ALL facts from user message even if implied
163
  - Update hypothesis confidence based on new evidence
164
  - search_queries must be specific legal questions for vector search"""
@@ -186,6 +189,8 @@ Rules:
186
  "urgency": "medium",
187
  "hypotheses": [{"claim": user_message[:80], "confidence": "low", "evidence": []}],
188
  "facts_extracted": {}, "facts_missing": [],
 
 
189
  "stage": "understanding", "last_response_type": last_response_type,
190
  "updated_summary": f"{summary} | {user_message[:100]}",
191
  "search_queries": [user_message[:200]],
@@ -193,6 +198,21 @@ Rules:
193
  "format_decision": "none"
194
  }
195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
  return analysis
197
 
198
 
@@ -271,18 +291,29 @@ Active hypotheses:
271
  Missing facts: {', '.join(cs.get('facts_missing', [])) or 'none critical'}
272
  Stage: {cs.get('stage', 'intake')}"""
273
 
 
274
  interpret_instruction = ""
275
  should_interpret = analysis.get("should_interpret_context", False)
276
  if should_interpret and not cs.get("context_interpreted"):
277
- interpret_instruction = """
278
- CONTEXT REFLECTION: Before your main response, briefly (2-3 lines) reflect your understanding back to the user. Start with "Based on what you've told me..." This builds trust and confirms you've been tracking the situation."""
279
 
280
- radar_instruction = """
281
- PROACTIVE RADAR add after your main answer when user has described a real situation:
282
- Add a brief "⚡ You Should Also Know" section (3-4 lines max).
283
- Surface 1-2 related legal issues or remedies the user hasn't asked about but which are directly relevant.
284
- Example: User asked about wrongful termination → proactively mention injunction under Specific Relief Act as faster remedy.
285
- Skip this section for purely academic questions with no personal situation described."""
 
 
 
 
 
 
 
 
 
 
 
286
 
287
  summary = session.get("summary", "")
288
  last_msgs = session.get("last_3_messages", [])
@@ -291,6 +322,16 @@ Skip this section for purely academic questions with no personal situation descr
291
  for m in last_msgs[-4:]
292
  ) if last_msgs else ""
293
 
 
 
 
 
 
 
 
 
 
 
294
  user_content = f"""CONVERSATION SUMMARY:
295
  {summary if summary else "First message."}
296
 
@@ -304,19 +345,21 @@ RETRIEVED LEGAL SOURCES:
304
  USER MESSAGE: {user_message}
305
 
306
  THIS TURN:
 
307
  - Legal hypotheses: {', '.join(h['claim'] for h in analysis.get('hypotheses', [])[:3]) or 'analysing'}
308
- - Stage: {analysis.get('stage', 'understanding')}
309
  - Urgency: {analysis.get('urgency', 'medium')}
310
- - Response type: {analysis.get('action_needed', 'advice')}
311
  - Format: {analysis.get('format_decision', 'appropriate for content')}
312
  {interpret_instruction}
 
313
 
314
  Instructions:
315
- - Cite specific sources when making legal claims
 
316
  - Use your legal knowledge for reasoning and context
317
- - Format: {analysis.get('format_decision', 'use the most appropriate format for the content type')}
318
- - Opposition war-gaming: if giving strategy, include what the other side will argue
319
- {radar_instruction}"""
320
 
321
  response = _client.chat.completions.create(
322
  model="llama-3.3-70b-versatile",
@@ -324,8 +367,8 @@ Instructions:
324
  {"role": "system", "content": system_prompt},
325
  {"role": "user", "content": user_content}
326
  ],
327
- temperature=0.3,
328
- max_tokens=1500
329
  )
330
 
331
  return response.choices[0].message.content
@@ -347,6 +390,7 @@ def run_query_v2(user_message: str, session_id: str) -> Dict[str, Any]:
347
  "urgency": "medium",
348
  "hypotheses": [{"claim": user_message[:80], "confidence": "low", "evidence": []}],
349
  "facts_extracted": {}, "facts_missing": [],
 
350
  "stage": "understanding", "last_response_type": "none",
351
  "updated_summary": user_message[:200],
352
  "search_queries": [user_message[:200]],
@@ -363,7 +407,7 @@ def run_query_v2(user_message: str, session_id: str) -> Dict[str, Any]:
363
  if not search_queries:
364
  search_queries = [augmented_message]
365
 
366
- # Add queries from issue spotter legal_issues
367
  for issue in analysis.get("legal_issues", []):
368
  statutes = issue.get("relevant_statutes", [])
369
  specific = issue.get("specific_issue", "")
 
54
  "turn_count": 0,
55
  "facts_missing": [],
56
  "context_interpreted": False,
57
+ "radar_count": 0, # track how often radar has fired
58
+ "last_had_disclaimer": False,
59
  }
60
 
61
 
 
161
 
162
  Rules:
163
  - If last_response_type was "question", action_needed CANNOT be "question"
164
+ - action_needed CANNOT be the same as last_response_type
165
  - Extract ALL facts from user message even if implied
166
  - Update hypothesis confidence based on new evidence
167
  - search_queries must be specific legal questions for vector search"""
 
189
  "urgency": "medium",
190
  "hypotheses": [{"claim": user_message[:80], "confidence": "low", "evidence": []}],
191
  "facts_extracted": {}, "facts_missing": [],
192
+ "legal_issues": [],
193
+ "clarifying_question": {},
194
  "stage": "understanding", "last_response_type": last_response_type,
195
  "updated_summary": f"{summary} | {user_message[:100]}",
196
  "search_queries": [user_message[:200]],
 
198
  "format_decision": "none"
199
  }
200
 
201
+ # Hard enforce variety at code level
202
+ if analysis.get("action_needed") == last_response_type and last_response_type != "none":
203
+ fallback_map = {
204
+ "question": "partial_finding",
205
+ "advice": "observation",
206
+ "reflection": "partial_finding",
207
+ "partial_finding": "advice",
208
+ "observation": "advice",
209
+ "reassurance": "partial_finding",
210
+ "strategy": "observation",
211
+ "explanation": "advice",
212
+ }
213
+ analysis["action_needed"] = fallback_map.get(last_response_type, "advice")
214
+ logger.info(f"Variety enforcement: changed action from {last_response_type} to {analysis['action_needed']}")
215
+
216
  return analysis
217
 
218
 
 
291
  Missing facts: {', '.join(cs.get('facts_missing', [])) or 'none critical'}
292
  Stage: {cs.get('stage', 'intake')}"""
293
 
294
+ # Context interpretation — only when explicitly flagged, not every turn
295
  interpret_instruction = ""
296
  should_interpret = analysis.get("should_interpret_context", False)
297
  if should_interpret and not cs.get("context_interpreted"):
298
+ interpret_instruction = "\nBefore your main response, briefly (2 lines max) reflect your understanding back. Start with 'Based on what you've told me...' — only once, never again."
 
299
 
300
+ # Radar — suppress if fired recently
301
+ radar_count = cs.get("radar_count", 0)
302
+ turn_count = cs.get("turn_count", 0)
303
+ show_radar = (turn_count - radar_count) >= 3 # only every 3 turns
304
+ if show_radar:
305
+ cs["radar_count"] = turn_count
306
+ radar_instruction = """
307
+ PROACTIVE RADAR — add ONE brief "⚡ You Should Also Know" line (1-2 sentences only).
308
+ Surface one related legal angle the user hasn't asked about but which is directly relevant.
309
+ Skip if the response is already long or if this is a purely academic question."""
310
+ else:
311
+ radar_instruction = ""
312
+
313
+ # Disclaimer — suppress on short follow-up turns
314
+ stage = analysis.get("stage", "understanding")
315
+ show_disclaimer = stage not in ["understanding", "followup"] and turn_count % 2 == 0
316
+ disclaimer_instruction = '\nEnd with: "Note: This is not legal advice. Consult a qualified advocate for your specific situation."' if show_disclaimer else ""
317
 
318
  summary = session.get("summary", "")
319
  last_msgs = session.get("last_3_messages", [])
 
322
  for m in last_msgs[-4:]
323
  ) if last_msgs else ""
324
 
325
+ action = analysis.get("action_needed", "advice")
326
+
327
+ # For question turns — force brevity
328
+ if action == "question":
329
+ length_instruction = "\nKEEP THIS RESPONSE TO 3 SENTENCES MAXIMUM. Ask the question. Nothing else."
330
+ elif stage in ["understanding", "intake"]:
331
+ length_instruction = "\nKEEP THIS RESPONSE UNDER 150 WORDS."
332
+ else:
333
+ length_instruction = ""
334
+
335
  user_content = f"""CONVERSATION SUMMARY:
336
  {summary if summary else "First message."}
337
 
 
345
  USER MESSAGE: {user_message}
346
 
347
  THIS TURN:
348
+ - Response type: {action} — execute this type ONLY, do not mix with other types
349
  - Legal hypotheses: {', '.join(h['claim'] for h in analysis.get('hypotheses', [])[:3]) or 'analysing'}
350
+ - Stage: {stage}
351
  - Urgency: {analysis.get('urgency', 'medium')}
 
352
  - Format: {analysis.get('format_decision', 'appropriate for content')}
353
  {interpret_instruction}
354
+ {length_instruction}
355
 
356
  Instructions:
357
+ - Execute the response type "{action}" and ONLY that type this turn
358
+ - Cite specific sources only when directly relevant — not in every response
359
  - Use your legal knowledge for reasoning and context
360
+ - If giving strategy: include what the other side will argue
361
+ {radar_instruction}
362
+ {disclaimer_instruction}"""
363
 
364
  response = _client.chat.completions.create(
365
  model="llama-3.3-70b-versatile",
 
367
  {"role": "system", "content": system_prompt},
368
  {"role": "user", "content": user_content}
369
  ],
370
+ temperature=0.7, # higher = more varied, less templated
371
+ max_tokens=800 # shorter responses break the multi-section pattern
372
  )
373
 
374
  return response.choices[0].message.content
 
390
  "urgency": "medium",
391
  "hypotheses": [{"claim": user_message[:80], "confidence": "low", "evidence": []}],
392
  "facts_extracted": {}, "facts_missing": [],
393
+ "legal_issues": [], "clarifying_question": {},
394
  "stage": "understanding", "last_response_type": "none",
395
  "updated_summary": user_message[:200],
396
  "search_queries": [user_message[:200]],
 
407
  if not search_queries:
408
  search_queries = [augmented_message]
409
 
410
+ # Add queries from issue spotter
411
  for issue in analysis.get("legal_issues", []):
412
  statutes = issue.get("relevant_statutes", [])
413
  specific = issue.get("specific_issue", "")