File size: 4,047 Bytes
fbf4182
bf9641a
f9d767c
 
 
 
fbf4182
f9d767c
 
 
fbf4182
f9d767c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fbf4182
 
f9d767c
fbf4182
 
 
 
 
f9d767c
 
 
fbf4182
f9d767c
 
 
 
fbf4182
f9d767c
 
 
 
 
 
 
fbf4182
f9d767c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fbf4182
f9d767c
 
fbf4182
f9d767c
fbf4182
f9d767c
 
 
 
 
fbf4182
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
from kanon_api import search_cases, get_case_content  # now async versions
from vectorstore import create_vector_store
from google import genai
import os
import re
import json
import asyncio

client = genai.Client(api_key=os.getenv("GOOGLE_API_KEY"))

async def predict_outcome(user_case: str):
    """
    Predict likely case outcome using AI based on related past cases.
    """

    # 1️⃣ Generate legal search query
    search_prompt = f"""
You are an expert Indian legal AI assistant.
Given these case facts, generate a precise **search query** suitable for finding relevant Indian legal cases on a legal database like Indian Kanoon.

Case facts:
{user_case}

Requirements:
- Output **only one line** in natural language.
- Include **relevant Indian laws, sections, or keywords** if applicable.
- Make it precise for legal search; do **not** use generic phrases.
- Return **only the query**, nothing else, no explanation.
- DOnt Give Output This " Some " or " .."  Like That DOnt Give In response only one best Line Match the Case To Give Only One

Example output:
"Liability for defective vehicles and accident compensation."
"About compensation for deaths and injuries due to a road accident caused by a vehicle defect"
"""
    search_chat = client.chats.create(model="gemini-2.5-flash-lite")
    query_response = search_chat.send_message(search_prompt)
    query = query_response.text.strip().replace("\n", " ").strip('"').strip("'")
    print("Generated legal search query:", query)

    # 2️⃣ Search related cases (async)
    related_cases_data = await search_cases(query, max_results=10)

    # 3️⃣ Fetch full text for each result concurrently
    tasks = [get_case_content(case["url"]) for case in related_cases_data]
    texts = await asyncio.gather(*tasks)
    for case, text in zip(related_cases_data, texts):
        case["text"] = text

    related_cases_texts = [case["text"] for case in related_cases_data if case.get("text")]
    if not related_cases_texts:
        return {"error": "No relevant cases found to analyze."}

    # 4️⃣ Create vector store
    vectorstore = create_vector_store(related_cases_texts)
    if not vectorstore:
        return {"error": "Vector store creation failed."}

    # 5️⃣ Retrieve relevant cases
    retriever = vectorstore.as_retriever()
    relevant_docs = retriever.invoke(user_case)
    combined_text = "\n".join([d.page_content for d in relevant_docs])

    if not combined_text.strip():
        return {"error": "No relevant context could be found from retrieved cases."}

    # 6️⃣ Generate final prediction
    prompt = f"""
You are an expert Indian legal AI assistant.
User case facts:
{user_case}

Consider these previous cases:
{combined_text}

Return the output strictly as JSON with the following keys:
- "probability": estimated percentage chance of winning the case (number between 0-100)
- "timeline": approximate duration or end period of the case based on similar past cases
- "feature_points": list of key points favoring win/loss and any major influencing factors

Example JSON:
{{
  "probability": 75,
  "timeline": "6-12 months",
  "feature_points": [
    "Plaintiff has strong documentary evidence",
    "Defendant has prior similar case loss",
    "Possible delay due to procedural issues"
  ]
}}
Do **not** include any explanation outside the JSON.
"""
    chat = client.chats.create(model="gemini-2.0-flash-exp")
    response = chat.send_message(prompt)

    raw_text = response.text.strip()

    # Clean ```json``` or wrapping quotes
    raw_text = re.sub(r"^```json\s*|^```|```$", "", raw_text, flags=re.IGNORECASE).strip()
    if (raw_text.startswith('"') and raw_text.endswith('"')) or (raw_text.startswith("'") and raw_text.endswith("'")):
        raw_text = raw_text[1:-1].strip().replace('\\"', '"').replace("\\'", "'")

    # Parse JSON
    try:
        result_json = json.loads(raw_text)
    except json.JSONDecodeError:
        result_json = {"error": "AI did not return valid JSON", "raw_response": raw_text}

    return result_json