Update app.py
Browse files
app.py
CHANGED
|
@@ -9,7 +9,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
| 9 |
|
| 10 |
# App framework
|
| 11 |
st.title('🦜Seon\'s Legal QA For Dummies 🔗 ')
|
| 12 |
-
|
| 13 |
|
| 14 |
|
| 15 |
offload_folder = 'C:\model_weights'
|
|
@@ -105,11 +105,10 @@ instruction = "Convert the following input text from stupid to legally reasoned
|
|
| 105 |
template = get_prompt(instruction, system_prompt)
|
| 106 |
print(template)
|
| 107 |
|
| 108 |
-
|
| 109 |
|
| 110 |
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
| 111 |
|
| 112 |
-
|
| 113 |
text = st.text_input('Plug in your prompt here')
|
| 114 |
# Instantiate the prompt template # this will show stuff to the screen if there's a prompt
|
| 115 |
|
|
|
|
| 9 |
|
| 10 |
# App framework
|
| 11 |
st.title('🦜Seon\'s Legal QA For Dummies 🔗 ')
|
| 12 |
+
|
| 13 |
|
| 14 |
|
| 15 |
offload_folder = 'C:\model_weights'
|
|
|
|
| 105 |
template = get_prompt(instruction, system_prompt)
|
| 106 |
print(template)
|
| 107 |
|
| 108 |
+
prompt = PromptTemplate(template=template, input_variables=["text"])
|
| 109 |
|
| 110 |
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
| 111 |
|
|
|
|
| 112 |
text = st.text_input('Plug in your prompt here')
|
| 113 |
# Instantiate the prompt template # this will show stuff to the screen if there's a prompt
|
| 114 |
|