Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import requests
|
| 3 |
+
from datasets import load_dataset
|
| 4 |
+
from transformers import pipeline
|
| 5 |
+
|
| 6 |
+
# Load the dataset
|
| 7 |
+
dataset = load_dataset("viber1/indian-law-dataset")['train']
|
| 8 |
+
|
| 9 |
+
# Load a pre-trained language model for question-answering
|
| 10 |
+
qa_model = pipeline("question-answering", model="deepset/roberta-base-squad2")
|
| 11 |
+
|
| 12 |
+
def get_answer_from_api(query):
|
| 13 |
+
# Use CourtListener API to get legal information
|
| 14 |
+
base_url = "https://www.courtlistener.com/api/rest/v4/search/"
|
| 15 |
+
headers = {
|
| 16 |
+
"Authorization": "Token 9c70738ed9eb3cce4f3782a91c7c8a218c180b89" # Replace with your actual API token
|
| 17 |
+
}
|
| 18 |
+
params = {
|
| 19 |
+
"q": query,
|
| 20 |
+
"page_size": 1 # Limit the number of results returned
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
response = requests.get(base_url, headers=headers, params=params)
|
| 25 |
+
response.raise_for_status() # Raise an error for bad responses
|
| 26 |
+
results = response.json()
|
| 27 |
+
|
| 28 |
+
# Check if there are any results
|
| 29 |
+
if results.get('count', 0) > 0:
|
| 30 |
+
return results['results'][0]['case_name'] # Adjust based on actual response structure
|
| 31 |
+
else:
|
| 32 |
+
return None # No results found
|
| 33 |
+
except requests.RequestException as e:
|
| 34 |
+
print(f"API request failed: {e}") # Print the error message for debugging
|
| 35 |
+
return None # Return None if there was an error
|
| 36 |
+
|
| 37 |
+
def get_answer_from_dataset(query):
|
| 38 |
+
# Look for an answer in the dataset
|
| 39 |
+
for entry in dataset:
|
| 40 |
+
if query.lower() in entry['Instruction'].lower():
|
| 41 |
+
return entry['Response']
|
| 42 |
+
return None # No answer found in the dataset
|
| 43 |
+
|
| 44 |
+
def get_answer_from_model(query):
|
| 45 |
+
# Use the pre-trained model to generate an answer
|
| 46 |
+
context = " ".join([entry['Response'] for entry in dataset]) # Combine all responses from dataset
|
| 47 |
+
result = qa_model(question=query, context=context)
|
| 48 |
+
return result['answer'] if result['score'] > 0.2 else None # Return answer if confidence score is high
|
| 49 |
+
|
| 50 |
+
def respond(query):
|
| 51 |
+
# First, try to get the answer from the API
|
| 52 |
+
answer = get_answer_from_dataset(query)
|
| 53 |
+
if answer:
|
| 54 |
+
return answer # Return if found in API
|
| 55 |
+
|
| 56 |
+
# If not found, look in the dataset
|
| 57 |
+
answer = get_answer_from_model(query)
|
| 58 |
+
if answer:
|
| 59 |
+
return answer # Return if found in dataset
|
| 60 |
+
|
| 61 |
+
# If still no answer, use the model
|
| 62 |
+
return get_answer_from_api(query)
|
| 63 |
+
|
| 64 |
+
# Gradio interface
|
| 65 |
+
demo = gr.Interface(
|
| 66 |
+
fn=respond,
|
| 67 |
+
inputs="text",
|
| 68 |
+
outputs="text",
|
| 69 |
+
title="AI Legal Assistant",
|
| 70 |
+
description="Ask your legal queries regarding Indian laws"
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
if _name_ == "_main_":
|
| 74 |
+
demo.launch()
|