Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,9 +15,6 @@ dataset = load_dataset("ibunescu/qa_legal_dataset_train")
|
|
| 15 |
# Use a pipeline as a high-level helper
|
| 16 |
pipe = pipeline("fill-mask", model="nlpaueb/legal-bert-base-uncased")
|
| 17 |
|
| 18 |
-
"""
|
| 19 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
| 20 |
-
"""
|
| 21 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 22 |
|
| 23 |
def respond(
|
|
@@ -52,32 +49,27 @@ def respond(
|
|
| 52 |
yield response, history + [(message, response)]
|
| 53 |
|
| 54 |
def score_argument(argument):
|
| 55 |
-
# Keywords related to legal arguments
|
| 56 |
merits_keywords = ["compelling", "convincing", "strong", "solid"]
|
| 57 |
laws_keywords = ["statute", "law", "regulation", "act"]
|
| 58 |
precedents_keywords = ["precedent", "case", "ruling", "decision"]
|
| 59 |
verdict_keywords = ["guilty", "innocent", "verdict", "judgment"]
|
| 60 |
|
| 61 |
-
# Initialize scores
|
| 62 |
merits_score = sum([1 for word in merits_keywords if word in argument.lower()])
|
| 63 |
laws_score = sum([1 for word in laws_keywords if word in argument.lower()])
|
| 64 |
precedents_score = sum([1 for word in precedents_keywords if word in argument.lower()])
|
| 65 |
verdict_score = sum([1 for word in verdict_keywords if word in argument.lower()])
|
| 66 |
length_score = len(argument.split())
|
| 67 |
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
verdict_value = verdict_score * 5 # Each keyword in verdict is valued at 5 points
|
| 73 |
|
| 74 |
-
# Total score: Sum of all individual scores
|
| 75 |
total_score = merits_value + laws_value + precedents_value + verdict_value + length_score
|
| 76 |
|
| 77 |
return total_score
|
| 78 |
|
| 79 |
def color_code(score):
|
| 80 |
-
# Green for high score, yellow for medium, red for low
|
| 81 |
if score > 50:
|
| 82 |
return "green"
|
| 83 |
elif score > 30:
|
|
@@ -153,6 +145,13 @@ footer {
|
|
| 153 |
color: black;
|
| 154 |
margin: 5px;
|
| 155 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
"""
|
| 157 |
|
| 158 |
# Function to facilitate the conversation between the two chatbots
|
|
@@ -162,12 +161,10 @@ def chat_between_bots(system_message1, system_message2, max_tokens, temperature,
|
|
| 162 |
shared_history.append(f"Prosecutor: {response1}")
|
| 163 |
shared_history.append(f"Defense Attorney: {response2}")
|
| 164 |
|
| 165 |
-
# Ensure the responses are balanced by limiting the length
|
| 166 |
max_length = max(len(response1), len(response2))
|
| 167 |
response1 = response1[:max_length]
|
| 168 |
response2 = response2[:max_length]
|
| 169 |
|
| 170 |
-
# Calculate scores and scoring matrices
|
| 171 |
score1 = score_argument(response1)
|
| 172 |
score2 = score_argument(response2)
|
| 173 |
|
|
@@ -179,11 +176,9 @@ def chat_between_bots(system_message1, system_message2, max_tokens, temperature,
|
|
| 179 |
|
| 180 |
return response1, response2, history1, history2, shared_history, f"{response1}\n\n{response2}", prosecutor_score_color, defense_score_color
|
| 181 |
|
| 182 |
-
# Function to handle PDF uploads and display them in a gallery
|
| 183 |
def update_pdf_gallery(pdf_files):
|
| 184 |
return pdf_files
|
| 185 |
|
| 186 |
-
# Function to add message to the chatbot
|
| 187 |
def add_message(history, message):
|
| 188 |
for x in message["files"]:
|
| 189 |
history.append(((x,), None))
|
|
@@ -191,7 +186,6 @@ def add_message(history, message):
|
|
| 191 |
history.append((message["text"], None))
|
| 192 |
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
| 193 |
|
| 194 |
-
# Bot function to simulate response
|
| 195 |
def bot(history):
|
| 196 |
response = "**That's cool!**"
|
| 197 |
history[-1][1] = ""
|
|
@@ -200,7 +194,6 @@ def bot(history):
|
|
| 200 |
time.sleep(0.05)
|
| 201 |
yield history
|
| 202 |
|
| 203 |
-
# Function to handle likes and dislikes
|
| 204 |
def print_like_dislike(x: gr.LikeData):
|
| 205 |
print(x.index, x.value, x.liked)
|
| 206 |
|
|
@@ -210,48 +203,53 @@ with gr.Blocks(css=custom_css) as demo:
|
|
| 210 |
shared_history = gr.State([])
|
| 211 |
pdf_files = gr.State([])
|
| 212 |
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
with gr.Row():
|
| 221 |
-
with gr.Column(scale=4):
|
| 222 |
-
prosecutor_response = gr.Textbox(label="Prosecutor's Response", interactive=True)
|
| 223 |
-
with gr.Column(scale=1):
|
| 224 |
-
prosecutor_score_color = gr.HTML()
|
| 225 |
|
| 226 |
-
with gr.
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 234 |
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
|
|
|
| 240 |
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
|
| 248 |
-
|
| 249 |
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
|
| 254 |
-
|
| 255 |
|
| 256 |
demo.queue()
|
| 257 |
demo.launch()
|
|
|
|
| 15 |
# Use a pipeline as a high-level helper
|
| 16 |
pipe = pipeline("fill-mask", model="nlpaueb/legal-bert-base-uncased")
|
| 17 |
|
|
|
|
|
|
|
|
|
|
| 18 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 19 |
|
| 20 |
def respond(
|
|
|
|
| 49 |
yield response, history + [(message, response)]
|
| 50 |
|
| 51 |
def score_argument(argument):
|
|
|
|
| 52 |
merits_keywords = ["compelling", "convincing", "strong", "solid"]
|
| 53 |
laws_keywords = ["statute", "law", "regulation", "act"]
|
| 54 |
precedents_keywords = ["precedent", "case", "ruling", "decision"]
|
| 55 |
verdict_keywords = ["guilty", "innocent", "verdict", "judgment"]
|
| 56 |
|
|
|
|
| 57 |
merits_score = sum([1 for word in merits_keywords if word in argument.lower()])
|
| 58 |
laws_score = sum([1 for word in laws_keywords if word in argument.lower()])
|
| 59 |
precedents_score = sum([1 for word in precedents_keywords if word in argument.lower()])
|
| 60 |
verdict_score = sum([1 for word in verdict_keywords if word in argument.lower()])
|
| 61 |
length_score = len(argument.split())
|
| 62 |
|
| 63 |
+
merits_value = merits_score * 2
|
| 64 |
+
laws_value = laws_score * 3
|
| 65 |
+
precedents_value = precedents_score * 4
|
| 66 |
+
verdict_value = verdict_score * 5
|
|
|
|
| 67 |
|
|
|
|
| 68 |
total_score = merits_value + laws_value + precedents_value + verdict_value + length_score
|
| 69 |
|
| 70 |
return total_score
|
| 71 |
|
| 72 |
def color_code(score):
|
|
|
|
| 73 |
if score > 50:
|
| 74 |
return "green"
|
| 75 |
elif score > 30:
|
|
|
|
| 145 |
color: black;
|
| 146 |
margin: 5px;
|
| 147 |
}
|
| 148 |
+
.scroll-box {
|
| 149 |
+
max-height: 200px;
|
| 150 |
+
overflow-y: scroll;
|
| 151 |
+
border: 1px solid #ced4da;
|
| 152 |
+
padding: 10px;
|
| 153 |
+
border-radius: 4px;
|
| 154 |
+
}
|
| 155 |
"""
|
| 156 |
|
| 157 |
# Function to facilitate the conversation between the two chatbots
|
|
|
|
| 161 |
shared_history.append(f"Prosecutor: {response1}")
|
| 162 |
shared_history.append(f"Defense Attorney: {response2}")
|
| 163 |
|
|
|
|
| 164 |
max_length = max(len(response1), len(response2))
|
| 165 |
response1 = response1[:max_length]
|
| 166 |
response2 = response2[:max_length]
|
| 167 |
|
|
|
|
| 168 |
score1 = score_argument(response1)
|
| 169 |
score2 = score_argument(response2)
|
| 170 |
|
|
|
|
| 176 |
|
| 177 |
return response1, response2, history1, history2, shared_history, f"{response1}\n\n{response2}", prosecutor_score_color, defense_score_color
|
| 178 |
|
|
|
|
| 179 |
def update_pdf_gallery(pdf_files):
|
| 180 |
return pdf_files
|
| 181 |
|
|
|
|
| 182 |
def add_message(history, message):
|
| 183 |
for x in message["files"]:
|
| 184 |
history.append(((x,), None))
|
|
|
|
| 186 |
history.append((message["text"], None))
|
| 187 |
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
| 188 |
|
|
|
|
| 189 |
def bot(history):
|
| 190 |
response = "**That's cool!**"
|
| 191 |
history[-1][1] = ""
|
|
|
|
| 194 |
time.sleep(0.05)
|
| 195 |
yield history
|
| 196 |
|
|
|
|
| 197 |
def print_like_dislike(x: gr.LikeData):
|
| 198 |
print(x.index, x.value, x.liked)
|
| 199 |
|
|
|
|
| 203 |
shared_history = gr.State([])
|
| 204 |
pdf_files = gr.State([])
|
| 205 |
|
| 206 |
+
with gr.Tab("Argument Evaluation"):
|
| 207 |
+
message = gr.Textbox(label="Case to Argue")
|
| 208 |
+
system_message1 = gr.State("You are an expert Prosecutor. Give your best arguments for the case on behalf of the prosecution.")
|
| 209 |
+
system_message2 = gr.State("You are an expert Defense Attorney. Give your best arguments for the case on behalf of the Defense.")
|
| 210 |
+
max_tokens = gr.State(512)
|
| 211 |
+
temperature = gr.State(0.6)
|
| 212 |
+
top_p = gr.State(0.95)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 213 |
|
| 214 |
+
with gr.Row():
|
| 215 |
+
with gr.Column(scale=4):
|
| 216 |
+
prosecutor_response = gr.Textbox(label="Prosecutor's Response", interactive=True)
|
| 217 |
+
prosecutor_response.style(container=True).add_class("scroll-box")
|
| 218 |
+
with gr.Column(scale=1):
|
| 219 |
+
prosecutor_score_color = gr.HTML()
|
| 220 |
+
|
| 221 |
+
with gr.Column(scale=4):
|
| 222 |
+
defense_response = gr.Textbox(label="Defense Attorney's Response", interactive=True)
|
| 223 |
+
defense_response.style(container=True).add_class("scroll-box")
|
| 224 |
+
with gr.Column(scale=1):
|
| 225 |
+
defense_score_color = gr.HTML()
|
| 226 |
+
|
| 227 |
+
shared_argument = gr.Textbox(label="Case Outcome", interactive=True)
|
| 228 |
+
submit_btn = gr.Button("Argue")
|
| 229 |
+
|
| 230 |
+
submit_btn.click(chat_between_bots, inputs=[system_message1, system_message2, max_tokens, temperature, top_p, history1, history2, shared_history, message], outputs=[prosecutor_response, defense_response, history1, history2, shared_history, shared_argument, prosecutor_score_color, defense_score_color])
|
| 231 |
|
| 232 |
+
with gr.Tab("PDF Management"):
|
| 233 |
+
pdf_upload = gr.File(label="Upload Case Files (PDF)", file_types=[".pdf"])
|
| 234 |
+
pdf_gallery = gr.Gallery(label="PDF Gallery")
|
| 235 |
+
pdf_upload_btn = gr.Button("Update PDF Gallery")
|
| 236 |
+
|
| 237 |
+
pdf_upload_btn.click(update_pdf_gallery, inputs=[pdf_upload], outputs=[pdf_gallery, pdf_files])
|
| 238 |
|
| 239 |
+
with gr.Tab("Chatbot"):
|
| 240 |
+
chatbot = gr.Chatbot(
|
| 241 |
+
[],
|
| 242 |
+
elem_id="chatbot",
|
| 243 |
+
bubble_full_width=False
|
| 244 |
+
)
|
| 245 |
|
| 246 |
+
chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
|
| 247 |
|
| 248 |
+
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
|
| 249 |
+
bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
|
| 250 |
+
bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
|
| 251 |
|
| 252 |
+
chatbot.like(print_like_dislike, None, None)
|
| 253 |
|
| 254 |
demo.queue()
|
| 255 |
demo.launch()
|