File size: 3,924 Bytes
07ea386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
#!/usr/bin/env python3
import sys
import json

print("=" * 80)
print("LEGAL RAG BACKEND - FULL PIPELINE INFERENCE TEST")
print("=" * 80)

test_case = """
A person named Ramesh was caught by police officers while carrying 500 grams of 
heroin in his bag during a routine check at the railway station. Upon questioning, 
he admitted that he was transporting the drugs from one city to another for 
monetary compensation. He has no prior criminal record. The substance was 
confirmed to be heroin through forensic testing.
"""

print("\n📋 TEST CASE:")
print("-" * 80)
print(test_case.strip())
print("-" * 80)

print("\n🔄 Starting inference pipeline...")
print("\nStep 1: Loading LegalBERT model...")
try:
    from model_loader import predictVerdict, getConfidence
    print("✓ Model loaded successfully")
except Exception as e:
    print(f"✗ Error loading model: {e}")
    sys.exit(1)

print("\nStep 2: Predicting verdict...")
try:
    verdict = predictVerdict(test_case)
    confidence = getConfidence(test_case)
    print(f"✓ Verdict: {verdict.upper()}")
    print(f"✓ Confidence: {confidence:.4f} ({confidence*100:.2f}%)")
except Exception as e:
    print(f"✗ Error in prediction: {e}")
    sys.exit(1)

print("\nStep 3: Loading RAG system (FAISS + embeddings)...")
try:
    from rag_loader import retrieve
    print("✓ RAG system loaded (6 FAISS indices ready)")
except Exception as e:
    print(f"✗ Error loading RAG: {e}")
    sys.exit(1)

print("\nStep 4: Retrieving relevant legal documents...")
try:
    retrieved_chunks = retrieve(test_case, topK=3)
    print(f"✓ Retrieved chunks from {len(retrieved_chunks)} legal sources:")
    for source, chunks in retrieved_chunks.items():
        print(f"  • {source}: {len(chunks)} relevant documents")
except Exception as e:
    print(f"✗ Error in retrieval: {e}")
    sys.exit(1)

print("\nStep 5: Building comprehensive legal prompt...")
try:
    from prompt_builder import buildPrompt
    prompt = buildPrompt(test_case, verdict, confidence, retrieved_chunks)
    print(f"✓ Prompt generated ({len(prompt)} characters)")
except Exception as e:
    print(f"✗ Error building prompt: {e}")
    sys.exit(1)

print("\nStep 6: Running full case evaluation...")
try:
    from rag_service import evaluateCase
    result = evaluateCase(test_case)
    print("✓ Case evaluation complete")
except Exception as e:
    print(f"✗ Error in evaluation: {e}")
    sys.exit(1)

print("\n" + "=" * 80)
print("📊 FINAL RESULTS")
print("=" * 80)

print(f"\n🔍 VERDICT: {result['verdict'].upper()}")
print(f"📈 CONFIDENCE: {result['confidence']:.4f} ({result['confidence']*100:.2f}%)")

print(f"\n📚 RETRIEVED LEGAL REFERENCES:")
for category, chunks in result['retrievedChunks'].items():
    print(f"\n  [{category.upper()}] - {len(chunks)} documents retrieved")
    if chunks:
        print(f"  Sample from top result:")
        from utils import chunkToText
        sample = chunkToText(chunks[0])
        print(f"  {sample[:200]}..." if len(sample) > 200 else f"  {sample}")

print(f"\n📝 EXPLANATION/ANALYSIS:")
print("-" * 80)
explanation = result['explanation']
if len(explanation) > 1500:
    print(explanation[:1500])
    print(f"\n... (truncated, full length: {len(explanation)} characters)")
else:
    print(explanation)

print("\n" + "=" * 80)
print("✅ INFERENCE PIPELINE COMPLETED SUCCESSFULLY")
print("=" * 80)

print("\n💾 Saving detailed results to 'inference_result.json'...")
output_data = {
    "test_case": test_case.strip(),
    "verdict": result['verdict'],
    "confidence": result['confidence'],
    "retrieved_sources": {k: len(v) for k, v in result['retrievedChunks'].items()},
    "explanation_length": len(result['explanation']),
    "full_explanation": result['explanation']
}

with open('inference_result.json', 'w') as f:
    json.dump(output_data, f, indent=2)

print("✓ Results saved to 'inference_result.json'")