WilliamGazeley
commited on
Commit
·
6a2fca5
1
Parent(s):
56aa15a
Remove preprompt UI
Browse files
app.py
CHANGED
|
@@ -3,13 +3,10 @@ import huggingface_hub
|
|
| 3 |
import streamlit as st
|
| 4 |
from vllm import LLM, SamplingParams
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def get_system_message():
|
| 9 |
-
return """#Context:
|
| 10 |
-
You are an AI-based automated expert financial advisor named IRAI. You have a comprehensive understanding of finance and investing because you have trained on a extensive dataset based on of financial news, analyst reports, books, company filings, earnings call transcripts, and finance websites.
|
| 11 |
#Objective:
|
| 12 |
-
Answer questions accurately and truthfully given
|
| 13 |
Style and tone:
|
| 14 |
Please answer in a friendly and engaging manner representing a top female investment professional working at a leading investment bank.
|
| 15 |
#Audience:
|
|
@@ -17,7 +14,6 @@ The questions will be asked by top technology executives and CFO of large fintec
|
|
| 17 |
#Response:
|
| 18 |
Answer, concise yet insightful."""
|
| 19 |
|
| 20 |
-
|
| 21 |
@st.cache_resource(show_spinner=False)
|
| 22 |
def init_llm():
|
| 23 |
huggingface_hub.login(token=os.getenv("HF_TOKEN"))
|
|
@@ -43,35 +39,15 @@ def get_response(prompt):
|
|
| 43 |
return f"An error occurred: {str(e)}"
|
| 44 |
|
| 45 |
|
| 46 |
-
def get_response(prompt, custom_sys_msg):
|
| 47 |
-
try:
|
| 48 |
-
convo = [
|
| 49 |
-
{"role": "system", "content": custom_sys_msg},
|
| 50 |
-
{"role": "user", "content": prompt},
|
| 51 |
-
]
|
| 52 |
-
prompts = [llm.get_tokenizer().apply_chat_template(convo, tokenize=False)]
|
| 53 |
-
sampling_params = SamplingParams(temperature=0.3, top_p=0.95, max_tokens=2000, stop_token_ids=[128009])
|
| 54 |
-
outputs = llm.generate(prompts, sampling_params)
|
| 55 |
-
for output in outputs:
|
| 56 |
-
return output.outputs[0].text
|
| 57 |
-
except Exception as e:
|
| 58 |
-
return f"An error occurred: {str(e)}"
|
| 59 |
-
|
| 60 |
def main():
|
| 61 |
st.title("LLM-ADE 9B Demo")
|
| 62 |
|
| 63 |
-
# Retrieve the default system message
|
| 64 |
-
sys_msg = get_system_message()
|
| 65 |
-
|
| 66 |
-
# UI for editable preprompt
|
| 67 |
-
user_modified_sys_msg = st.text_area("Preprompt: ", value=sys_msg, height=200)
|
| 68 |
-
|
| 69 |
input_text = st.text_area("Enter your text here:", value="", height=200)
|
| 70 |
|
| 71 |
if st.button("Generate"):
|
| 72 |
if input_text:
|
| 73 |
with st.spinner('Generating response...'):
|
| 74 |
-
response_text = get_response(input_text
|
| 75 |
st.write(response_text)
|
| 76 |
else:
|
| 77 |
st.warning("Please enter some text to generate a response.")
|
|
|
|
| 3 |
import streamlit as st
|
| 4 |
from vllm import LLM, SamplingParams
|
| 5 |
|
| 6 |
+
sys_msg = """#Context:
|
| 7 |
+
You are an expert financial advisor named IRAI. You have a comprehensive understanding of finance and investing with experience and expertise in all areas of finance.
|
|
|
|
|
|
|
|
|
|
| 8 |
#Objective:
|
| 9 |
+
Answer questions accurately and truthfully given your current knowledge. You do not have access to up-to-date current market data; this will be available in the future.
|
| 10 |
Style and tone:
|
| 11 |
Please answer in a friendly and engaging manner representing a top female investment professional working at a leading investment bank.
|
| 12 |
#Audience:
|
|
|
|
| 14 |
#Response:
|
| 15 |
Answer, concise yet insightful."""
|
| 16 |
|
|
|
|
| 17 |
@st.cache_resource(show_spinner=False)
|
| 18 |
def init_llm():
|
| 19 |
huggingface_hub.login(token=os.getenv("HF_TOKEN"))
|
|
|
|
| 39 |
return f"An error occurred: {str(e)}"
|
| 40 |
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
def main():
|
| 43 |
st.title("LLM-ADE 9B Demo")
|
| 44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
input_text = st.text_area("Enter your text here:", value="", height=200)
|
| 46 |
|
| 47 |
if st.button("Generate"):
|
| 48 |
if input_text:
|
| 49 |
with st.spinner('Generating response...'):
|
| 50 |
+
response_text = get_response(input_text)
|
| 51 |
st.write(response_text)
|
| 52 |
else:
|
| 53 |
st.warning("Please enter some text to generate a response.")
|