Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
import os
|
| 2 |
import streamlit as st
|
|
|
|
| 3 |
from langchain.schema import HumanMessage
|
| 4 |
from langchain_groq import ChatGroq
|
| 5 |
from langgraph.graph import StateGraph, START, END
|
|
@@ -7,18 +8,24 @@ from pydantic import BaseModel
|
|
| 7 |
from langsmith import traceable
|
| 8 |
import traceback
|
| 9 |
|
| 10 |
-
# β
Load API keys
|
|
|
|
| 11 |
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
| 12 |
LANGSMITH_API_KEY = os.getenv("LANGSMITH_API_KEY")
|
| 13 |
|
| 14 |
-
# β
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
llm = ChatGroq(groq_api_key=GROQ_API_KEY, model_name="llama3-8b-8192")
|
| 16 |
|
| 17 |
# β
Define the LegalState Model
|
| 18 |
class LegalState(BaseModel):
|
| 19 |
original_text: str
|
| 20 |
tone: str
|
| 21 |
-
complexity: int = 1
|
| 22 |
rewritten_text: str = None
|
| 23 |
summary: str = None
|
| 24 |
key_clauses: str = None
|
|
@@ -30,8 +37,7 @@ class LegalState(BaseModel):
|
|
| 30 |
comparison_result: str = None
|
| 31 |
final_report: str = None
|
| 32 |
|
| 33 |
-
# β
Function to invoke LLM
|
| 34 |
-
@traceable(name="Generate Response")
|
| 35 |
def generate_response(prompt):
|
| 36 |
try:
|
| 37 |
response = llm.invoke([HumanMessage(content=prompt)])
|
|
@@ -39,10 +45,11 @@ def generate_response(prompt):
|
|
| 39 |
except Exception as e:
|
| 40 |
return f"β Error: {str(e)}"
|
| 41 |
|
| 42 |
-
# β
Define
|
| 43 |
@traceable(name="Rewrite Legal Text")
|
| 44 |
def rewrite_text(state: LegalState):
|
| 45 |
-
prompt = f"""Rewrite this legal text in '{state.tone}' tone with complexity level {state.complexity}:
|
|
|
|
| 46 |
return {"rewritten_text": generate_response(prompt)}
|
| 47 |
|
| 48 |
@traceable(name="Summarize Legal Text")
|
|
@@ -55,14 +62,23 @@ def extract_clauses(state: LegalState):
|
|
| 55 |
|
| 56 |
@traceable(name="Final Synthesis")
|
| 57 |
def synthesizer(state: LegalState):
|
| 58 |
-
|
| 59 |
-
π **
|
| 60 |
-
|
| 61 |
-
**
|
| 62 |
-
**
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
-
# β
|
| 65 |
builder = StateGraph(LegalState)
|
|
|
|
| 66 |
builder.add_node("rewrite_text", rewrite_text)
|
| 67 |
builder.add_node("summarize_text", summarize_text)
|
| 68 |
builder.add_node("extract_clauses", extract_clauses)
|
|
@@ -82,22 +98,18 @@ st.title("π AI-Powered Legal Text Processor")
|
|
| 82 |
original_text = st.text_area("Enter Legal Text")
|
| 83 |
tone = st.radio("Select Tone", ["Formal", "Empathetic", "Neutral", "Strength-Based"])
|
| 84 |
|
| 85 |
-
if st.button("
|
| 86 |
input_data = {"original_text": original_text, "tone": tone, "complexity": 1}
|
| 87 |
result = graph.invoke(input_data)
|
| 88 |
-
|
| 89 |
-
st.
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
|
|
|
| 93 |
result = graph.invoke(input_data)
|
| 94 |
-
st.
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
st.
|
| 98 |
-
|
| 99 |
-
st.subheader("πΉ Key Clauses")
|
| 100 |
-
st.write(result["key_clauses"])
|
| 101 |
-
|
| 102 |
-
st.subheader("π Final Report")
|
| 103 |
-
st.write(result["final_report"])
|
|
|
|
| 1 |
import os
|
| 2 |
import streamlit as st
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
from langchain.schema import HumanMessage
|
| 5 |
from langchain_groq import ChatGroq
|
| 6 |
from langgraph.graph import StateGraph, START, END
|
|
|
|
| 8 |
from langsmith import traceable
|
| 9 |
import traceback
|
| 10 |
|
| 11 |
+
# β
Load API keys securely
|
| 12 |
+
load_dotenv()
|
| 13 |
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
| 14 |
LANGSMITH_API_KEY = os.getenv("LANGSMITH_API_KEY")
|
| 15 |
|
| 16 |
+
# β
Set API keys as environment variables
|
| 17 |
+
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
|
| 18 |
+
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
| 19 |
+
os.environ["LANGCHAIN_API_KEY"] = LANGSMITH_API_KEY
|
| 20 |
+
|
| 21 |
+
# β
Initialize Open-Source LLM (Using Groq Llama3-8B)
|
| 22 |
llm = ChatGroq(groq_api_key=GROQ_API_KEY, model_name="llama3-8b-8192")
|
| 23 |
|
| 24 |
# β
Define the LegalState Model
|
| 25 |
class LegalState(BaseModel):
|
| 26 |
original_text: str
|
| 27 |
tone: str
|
| 28 |
+
complexity: int = 1
|
| 29 |
rewritten_text: str = None
|
| 30 |
summary: str = None
|
| 31 |
key_clauses: str = None
|
|
|
|
| 37 |
comparison_result: str = None
|
| 38 |
final_report: str = None
|
| 39 |
|
| 40 |
+
# β
Function to invoke LLM with error handling
|
|
|
|
| 41 |
def generate_response(prompt):
|
| 42 |
try:
|
| 43 |
response = llm.invoke([HumanMessage(content=prompt)])
|
|
|
|
| 45 |
except Exception as e:
|
| 46 |
return f"β Error: {str(e)}"
|
| 47 |
|
| 48 |
+
# β
Define Worker Functions with LangSmith Debugging
|
| 49 |
@traceable(name="Rewrite Legal Text")
|
| 50 |
def rewrite_text(state: LegalState):
|
| 51 |
+
prompt = f"""Rewrite this legal text in '{state.tone}' tone with complexity level {state.complexity}:
|
| 52 |
+
{state.original_text}"""
|
| 53 |
return {"rewritten_text": generate_response(prompt)}
|
| 54 |
|
| 55 |
@traceable(name="Summarize Legal Text")
|
|
|
|
| 62 |
|
| 63 |
@traceable(name="Final Synthesis")
|
| 64 |
def synthesizer(state: LegalState):
|
| 65 |
+
final_output = f"""
|
| 66 |
+
π **AI-Powered Legal Document Processing Report** π
|
| 67 |
+
|
| 68 |
+
**πΉ Rewritten Legal Text:**
|
| 69 |
+
**{state.rewritten_text}**
|
| 70 |
+
|
| 71 |
+
**πΉ Summary:**
|
| 72 |
+
{state.summary}
|
| 73 |
+
|
| 74 |
+
**πΉ Key Clauses Identified:**
|
| 75 |
+
{state.key_clauses}
|
| 76 |
+
"""
|
| 77 |
+
return {"final_report": final_output}
|
| 78 |
|
| 79 |
+
# β
Build LangGraph Workflow
|
| 80 |
builder = StateGraph(LegalState)
|
| 81 |
+
|
| 82 |
builder.add_node("rewrite_text", rewrite_text)
|
| 83 |
builder.add_node("summarize_text", summarize_text)
|
| 84 |
builder.add_node("extract_clauses", extract_clauses)
|
|
|
|
| 98 |
original_text = st.text_area("Enter Legal Text")
|
| 99 |
tone = st.radio("Select Tone", ["Formal", "Empathetic", "Neutral", "Strength-Based"])
|
| 100 |
|
| 101 |
+
if st.button("Rewrite Text"):
|
| 102 |
input_data = {"original_text": original_text, "tone": tone, "complexity": 1}
|
| 103 |
result = graph.invoke(input_data)
|
| 104 |
+
st.session_state["rewritten_text"] = result.get("rewritten_text", "")
|
| 105 |
+
st.session_state["show_regen"] = True
|
| 106 |
+
|
| 107 |
+
if "show_regen" in st.session_state and st.session_state["show_regen"]:
|
| 108 |
+
if st.button("Regenerate Text"):
|
| 109 |
+
input_data = {"original_text": original_text, "tone": tone, "complexity": 1}
|
| 110 |
result = graph.invoke(input_data)
|
| 111 |
+
st.session_state["rewritten_text"] = result.get("rewritten_text", "")
|
| 112 |
+
|
| 113 |
+
if "rewritten_text" in st.session_state:
|
| 114 |
+
st.subheader("Rewritten Text")
|
| 115 |
+
st.write(f"**{st.session_state['rewritten_text']}**")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|