initial commit
Browse files
app.py
CHANGED
|
@@ -18,12 +18,14 @@ class LegalExpert:
|
|
| 18 |
def __init__(self):
|
| 19 |
self.system_prompt = self.get_system_prompt()
|
| 20 |
|
| 21 |
-
self.user_prompt = HumanMessagePromptTemplate.from_template("{
|
| 22 |
|
| 23 |
full_prompt_template = ChatPromptTemplate.from_messages(
|
| 24 |
[self.system_prompt, self.user_prompt]
|
| 25 |
)
|
| 26 |
|
|
|
|
|
|
|
| 27 |
self.chat = ChatAnthropic()
|
| 28 |
|
| 29 |
self.chain = LLMChain(llm=self.chat, prompt=full_prompt_template)
|
|
@@ -50,7 +52,7 @@ class LegalExpert:
|
|
| 50 |
|
| 51 |
def run_chain(self, language, context, question):
|
| 52 |
return self.chain.run(
|
| 53 |
-
language=language, context=context,
|
| 54 |
)
|
| 55 |
|
| 56 |
|
|
@@ -63,7 +65,7 @@ def retrieve_pdf_text(pdf_file):
|
|
| 63 |
|
| 64 |
|
| 65 |
# create a streamlit app
|
| 66 |
-
st.title("
|
| 67 |
|
| 68 |
if "LegalExpert" not in st.session_state:
|
| 69 |
st.session_state.LegalExpert = LegalExpert()
|
|
|
|
| 18 |
def __init__(self):
|
| 19 |
self.system_prompt = self.get_system_prompt()
|
| 20 |
|
| 21 |
+
self.user_prompt = HumanMessagePromptTemplate.from_template("{question}")
|
| 22 |
|
| 23 |
full_prompt_template = ChatPromptTemplate.from_messages(
|
| 24 |
[self.system_prompt, self.user_prompt]
|
| 25 |
)
|
| 26 |
|
| 27 |
+
# create llm from huggingfaceHub model
|
| 28 |
+
|
| 29 |
self.chat = ChatAnthropic()
|
| 30 |
|
| 31 |
self.chain = LLMChain(llm=self.chat, prompt=full_prompt_template)
|
|
|
|
| 52 |
|
| 53 |
def run_chain(self, language, context, question):
|
| 54 |
return self.chain.run(
|
| 55 |
+
language=language, context=context, question=question
|
| 56 |
)
|
| 57 |
|
| 58 |
|
|
|
|
| 65 |
|
| 66 |
|
| 67 |
# create a streamlit app
|
| 68 |
+
st.title("Document Explainer (that does not give advice)")
|
| 69 |
|
| 70 |
if "LegalExpert" not in st.session_state:
|
| 71 |
st.session_state.LegalExpert = LegalExpert()
|