CaffeinatedCoding commited on
Commit
d484eff
·
verified ·
1 Parent(s): 4910a59

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. src/agent_v2.py +17 -59
src/agent_v2.py CHANGED
@@ -54,8 +54,6 @@ def empty_case_state() -> Dict:
54
  "turn_count": 0,
55
  "facts_missing": [],
56
  "context_interpreted": False,
57
- "radar_count": 0, # track how often radar has fired
58
- "last_had_disclaimer": False,
59
  }
60
 
61
 
@@ -161,7 +159,6 @@ NEW USER MESSAGE:
161
 
162
  Rules:
163
  - If last_response_type was "question", action_needed CANNOT be "question"
164
- - action_needed CANNOT be the same as last_response_type
165
  - Extract ALL facts from user message even if implied
166
  - Update hypothesis confidence based on new evidence
167
  - search_queries must be specific legal questions for vector search"""
@@ -189,8 +186,7 @@ Rules:
189
  "urgency": "medium",
190
  "hypotheses": [{"claim": user_message[:80], "confidence": "low", "evidence": []}],
191
  "facts_extracted": {}, "facts_missing": [],
192
- "legal_issues": [],
193
- "clarifying_question": {},
194
  "stage": "understanding", "last_response_type": last_response_type,
195
  "updated_summary": f"{summary} | {user_message[:100]}",
196
  "search_queries": [user_message[:200]],
@@ -198,21 +194,6 @@ Rules:
198
  "format_decision": "none"
199
  }
200
 
201
- # Hard enforce variety at code level
202
- if analysis.get("action_needed") == last_response_type and last_response_type != "none":
203
- fallback_map = {
204
- "question": "partial_finding",
205
- "advice": "observation",
206
- "reflection": "partial_finding",
207
- "partial_finding": "advice",
208
- "observation": "advice",
209
- "reassurance": "partial_finding",
210
- "strategy": "observation",
211
- "explanation": "advice",
212
- }
213
- analysis["action_needed"] = fallback_map.get(last_response_type, "advice")
214
- logger.info(f"Variety enforcement: changed action from {last_response_type} to {analysis['action_needed']}")
215
-
216
  return analysis
217
 
218
 
@@ -291,29 +272,18 @@ Active hypotheses:
291
  Missing facts: {', '.join(cs.get('facts_missing', [])) or 'none critical'}
292
  Stage: {cs.get('stage', 'intake')}"""
293
 
294
- # Context interpretation — only when explicitly flagged, not every turn
295
  interpret_instruction = ""
296
  should_interpret = analysis.get("should_interpret_context", False)
297
  if should_interpret and not cs.get("context_interpreted"):
298
- interpret_instruction = "\nBefore your main response, briefly (2 lines max) reflect your understanding back. Start with 'Based on what you've told me...' — only once, never again."
 
299
 
300
- # Radar — suppress if fired recently
301
- radar_count = cs.get("radar_count", 0)
302
- turn_count = cs.get("turn_count", 0)
303
- show_radar = (turn_count - radar_count) >= 3 # only every 3 turns
304
- if show_radar:
305
- cs["radar_count"] = turn_count
306
- radar_instruction = """
307
- PROACTIVE RADAR — add ONE brief "⚡ You Should Also Know" line (1-2 sentences only).
308
- Surface one related legal angle the user hasn't asked about but which is directly relevant.
309
- Skip if the response is already long or if this is a purely academic question."""
310
- else:
311
- radar_instruction = ""
312
-
313
- # Disclaimer — suppress on short follow-up turns
314
- stage = analysis.get("stage", "understanding")
315
- show_disclaimer = stage not in ["understanding", "followup"] and turn_count % 2 == 0
316
- disclaimer_instruction = '\nEnd with: "Note: This is not legal advice. Consult a qualified advocate for your specific situation."' if show_disclaimer else ""
317
 
318
  summary = session.get("summary", "")
319
  last_msgs = session.get("last_3_messages", [])
@@ -322,16 +292,6 @@ Skip if the response is already long or if this is a purely academic question.""
322
  for m in last_msgs[-4:]
323
  ) if last_msgs else ""
324
 
325
- action = analysis.get("action_needed", "advice")
326
-
327
- # For question turns — force brevity
328
- if action == "question":
329
- length_instruction = "\nKEEP THIS RESPONSE TO 3 SENTENCES MAXIMUM. Ask the question. Nothing else."
330
- elif stage in ["understanding", "intake"]:
331
- length_instruction = "\nKEEP THIS RESPONSE UNDER 150 WORDS."
332
- else:
333
- length_instruction = ""
334
-
335
  user_content = f"""CONVERSATION SUMMARY:
336
  {summary if summary else "First message."}
337
 
@@ -345,21 +305,19 @@ RETRIEVED LEGAL SOURCES:
345
  USER MESSAGE: {user_message}
346
 
347
  THIS TURN:
348
- - Response type: {action} — execute this type ONLY, do not mix with other types
349
  - Legal hypotheses: {', '.join(h['claim'] for h in analysis.get('hypotheses', [])[:3]) or 'analysing'}
350
- - Stage: {stage}
351
  - Urgency: {analysis.get('urgency', 'medium')}
 
352
  - Format: {analysis.get('format_decision', 'appropriate for content')}
353
  {interpret_instruction}
354
- {length_instruction}
355
 
356
  Instructions:
357
- - Execute the response type "{action}" and ONLY that type this turn
358
- - Cite specific sources only when directly relevant — not in every response
359
  - Use your legal knowledge for reasoning and context
360
- - If giving strategy: include what the other side will argue
361
- {radar_instruction}
362
- {disclaimer_instruction}"""
363
 
364
  response = _client.chat.completions.create(
365
  model="llama-3.3-70b-versatile",
@@ -367,8 +325,8 @@ Instructions:
367
  {"role": "system", "content": system_prompt},
368
  {"role": "user", "content": user_content}
369
  ],
370
- temperature=0.7, # higher = more varied, less templated
371
- max_tokens=800 # shorter responses break the multi-section pattern
372
  )
373
 
374
  return response.choices[0].message.content
 
54
  "turn_count": 0,
55
  "facts_missing": [],
56
  "context_interpreted": False,
 
 
57
  }
58
 
59
 
 
159
 
160
  Rules:
161
  - If last_response_type was "question", action_needed CANNOT be "question"
 
162
  - Extract ALL facts from user message even if implied
163
  - Update hypothesis confidence based on new evidence
164
  - search_queries must be specific legal questions for vector search"""
 
186
  "urgency": "medium",
187
  "hypotheses": [{"claim": user_message[:80], "confidence": "low", "evidence": []}],
188
  "facts_extracted": {}, "facts_missing": [],
189
+ "legal_issues": [], "clarifying_question": {},
 
190
  "stage": "understanding", "last_response_type": last_response_type,
191
  "updated_summary": f"{summary} | {user_message[:100]}",
192
  "search_queries": [user_message[:200]],
 
194
  "format_decision": "none"
195
  }
196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
  return analysis
198
 
199
 
 
272
  Missing facts: {', '.join(cs.get('facts_missing', [])) or 'none critical'}
273
  Stage: {cs.get('stage', 'intake')}"""
274
 
 
275
  interpret_instruction = ""
276
  should_interpret = analysis.get("should_interpret_context", False)
277
  if should_interpret and not cs.get("context_interpreted"):
278
+ interpret_instruction = """
279
+ CONTEXT REFLECTION: Before your main response, briefly (2-3 lines) reflect your understanding back to the user. Start with "Based on what you've told me..." This builds trust and confirms you've been tracking the situation."""
280
 
281
+ radar_instruction = """
282
+ PROACTIVE RADAR add after your main answer when user has described a real situation:
283
+ Add a brief "⚡ You Should Also Know" section (3-4 lines max).
284
+ Surface 1-2 related legal issues or remedies the user hasn't asked about but which are directly relevant.
285
+ Example: User asked about wrongful termination → proactively mention injunction under Specific Relief Act as faster remedy.
286
+ Skip this section for purely academic questions with no personal situation described."""
 
 
 
 
 
 
 
 
 
 
 
287
 
288
  summary = session.get("summary", "")
289
  last_msgs = session.get("last_3_messages", [])
 
292
  for m in last_msgs[-4:]
293
  ) if last_msgs else ""
294
 
 
 
 
 
 
 
 
 
 
 
295
  user_content = f"""CONVERSATION SUMMARY:
296
  {summary if summary else "First message."}
297
 
 
305
  USER MESSAGE: {user_message}
306
 
307
  THIS TURN:
 
308
  - Legal hypotheses: {', '.join(h['claim'] for h in analysis.get('hypotheses', [])[:3]) or 'analysing'}
309
+ - Stage: {analysis.get('stage', 'understanding')}
310
  - Urgency: {analysis.get('urgency', 'medium')}
311
+ - Response type: {analysis.get('action_needed', 'advice')}
312
  - Format: {analysis.get('format_decision', 'appropriate for content')}
313
  {interpret_instruction}
 
314
 
315
  Instructions:
316
+ - Cite specific sources when making legal claims
 
317
  - Use your legal knowledge for reasoning and context
318
+ - Format: {analysis.get('format_decision', 'use the most appropriate format for the content type')}
319
+ - Opposition war-gaming: if giving strategy, include what the other side will argue
320
+ {radar_instruction}"""
321
 
322
  response = _client.chat.completions.create(
323
  model="llama-3.3-70b-versatile",
 
325
  {"role": "system", "content": system_prompt},
326
  {"role": "user", "content": user_content}
327
  ],
328
+ temperature=0.3,
329
+ max_tokens=1500
330
  )
331
 
332
  return response.choices[0].message.content