davidfearne commited on
Commit
842a437
·
verified ·
1 Parent(s): 51fdff8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -132
app.py CHANGED
@@ -1,133 +1,133 @@
1
- import streamlit as st
2
- from langchain_core.messages import AIMessage, HumanMessage
3
- from langchain_openai import ChatOpenAI
4
-
5
- from langchain_core.output_parsers import StrOutputParser
6
- from langchain_core.prompts import ChatPromptTemplate
7
-
8
- # from langchain.chat_models import AzureChatOpenAI
9
- from langchain_openai import AzureChatOpenAI
10
- from langchain.schema import HumanMessage, SystemMessage
11
- from langchain_core.prompts.chat import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate
12
- from azure_openai import qt
13
- from retriver import search_and_reconstruct
14
- # Initialize an instance of AzureOpenAI using the specified settings
15
- import pandas as pd
16
-
17
-
18
- # LLM Langchain Definition
19
- OPENAI_API_KEY = "86b631a9c0294e9698e327c59ff5ac2c"
20
- OPENAI_API_TYPE = "azure"
21
- OPENAI_API_BASE = "https://davidfearn-gpt4.openai.azure.com"
22
- # OPENAI_API_VERSION = "2024-02-01"
23
- OPENAI_API_VERSION = "2024-08-01-preview"
24
- # OPENAI_MODEL = "gpt4-turbo-1106"
25
- OPENAI_MODEL = "gpt-4o"
26
- # Initialize an instance of AzureOpenAI using the specified settings
27
-
28
-
29
- def get_response(chat_history, qte, knowledge, temp1, temp2, tokens1, tokens2, persona2SystemMessage, persona2UserMessage):
30
-
31
- llm = AzureChatOpenAI(
32
- openai_api_version=OPENAI_API_VERSION,
33
- openai_api_key=OPENAI_API_KEY,
34
- azure_endpoint=OPENAI_API_BASE,
35
- openai_api_type=OPENAI_API_TYPE,
36
- deployment_name=OPENAI_MODEL,
37
- temperature=temp2,
38
- max_tokens=tokens2
39
- # Name of the deployment for identification
40
- )
41
-
42
-
43
- system_message_template = SystemMessagePromptTemplate.from_template("your are a helpful ai")
44
- human_message_template = HumanMessagePromptTemplate.from_template("try and answer the questions {history}, knowledge {knowledge}")
45
-
46
- # Create a chat prompt template combining system and human messages
47
- prompt = ChatPromptTemplate.from_messages([system_message_template, human_message_template])
48
-
49
- chain = prompt | llm | StrOutputParser()
50
-
51
- return chain.stream({
52
- "history": chat_history,
53
- "knowledge": knowledge
54
- })
55
-
56
- placeHolderPersona1 = "place holder"
57
-
58
- # app config
59
- st.set_page_config(page_title="Reg Intel Chatbot", page_icon="🤖")
60
- st.title("Reg Intel Toolbox :toolbox:")
61
-
62
- # Sidebar for inputting personas
63
- st.sidebar.title("RAG System Designer")
64
- # st.sidebar.subheader("Welcome Message")
65
- # welcomeMessage = st.sidebar.text_area("Define Intake Persona", value=welcomeMessage, height=300)
66
- st.sidebar.header("Query Designer Config")
67
- # numberOfQuestions = st.sidebar.slider("Number of Questions", min_value=0, max_value=10, step=1, value=5, key='persona1_questions')
68
- persona1SystemMessage = st.sidebar.text_area("Query Designer System Message", value=placeHolderPersona1, height=300)
69
- temp1 = st.sidebar.slider("Temperature", min_value=0.0, max_value=1.0, step=0.1, value=0.6, key='persona1_temp')
70
- tokens1 = st.sidebar.slider("Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona1_tokens')
71
- st.sidebar.subheader("Number of Search Results")
72
- k = st.sidebar.slider("Returned Docs", min_value=1, max_value=10, step=1, value=3, key='k')
73
-
74
- st.sidebar.header("Engineered Prompt Config")
75
- persona2SystemMessage = st.sidebar.text_area("Answer Creation System Message", value=placeHolderPersona1, height=300)
76
- persona2UserMessage = st.sidebar.text_area("Answer Creation User Message", value=placeHolderPersona1, height=300)
77
- temp2 = st.sidebar.slider("Temperature", min_value=0.0, max_value=1.0, step=0.1, value=0.6, key='persona2_temp')
78
- tokens2 = st.sidebar.slider("Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona2_tokens')
79
-
80
-
81
- # session state
82
- if "chat_history" not in st.session_state:
83
- st.session_state.chat_history = [
84
- AIMessage(content="Hello, I am the GSK Reg Intel Assistant. How can I help you?"),
85
- ]
86
-
87
-
88
- # conversation
89
- for message in st.session_state.chat_history:
90
- if isinstance(message, AIMessage):
91
- with st.chat_message("AI"):
92
- st.write(message.content)
93
- elif isinstance(message, HumanMessage):
94
- with st.chat_message("Human"):
95
- st.write(message.content)
96
-
97
-
98
- # user input
99
- user_query = st.chat_input("Type your message here...")
100
- if user_query is not None and user_query != "":
101
- st.session_state.chat_history.append(HumanMessage(content=user_query))
102
-
103
- with st.chat_message("Human"):
104
- st.markdown(user_query)
105
-
106
- with st.chat_message("AI"):
107
- qte = qt(persona1SystemMessage, st.session_state.chat_history, temp1, tokens1)
108
- knowledge = search_and_reconstruct(qte, k)
109
- response = st.write_stream(get_response(st.session_state.chat_history, qte, knowledge, temp1, temp2, tokens1, tokens2, persona2SystemMessage, persona2UserMessage))
110
-
111
- st.session_state.chat_history.append(AIMessage(content=response))
112
- st.sidebar.header("QTE and Knowledge Results")
113
- st.sidebar.header("QTE")
114
- st.sidebar.text(qte)
115
-
116
- if knowledge:
117
- # Prepare the data for the table
118
- table_data = {
119
- "Title": [entry['Title'] for entry in knowledge],
120
- "Score": [entry.get('Score', 'N/A') for entry in knowledge],
121
- "Page Number": [entry['PageNumber'] for entry in knowledge],
122
- # "Grounding Text": [entry['ReconstructedText'] for entry in knowledge]
123
- }
124
-
125
- # Create a dataframe for displaying as a table
126
-
127
- df = pd.DataFrame(table_data)
128
-
129
- # Display the table in the sidebar
130
- st.sidebar.write("### Knowledge Base Results")
131
- st.sidebar.dataframe(df) # Adjust height as needed
132
- else:
133
  st.sidebar.write("No relevant knowledge base results found.")
 
1
+ import streamlit as st
2
+ from langchain_core.messages import AIMessage, HumanMessage
3
+ from langchain_openai import ChatOpenAI
4
+
5
+ from langchain_core.output_parsers import StrOutputParser
6
+ from langchain_core.prompts import ChatPromptTemplate
7
+
8
+ # from langchain.chat_models import AzureChatOpenAI
9
+ from langchain_openai import AzureChatOpenAI
10
+ from langchain.schema import HumanMessage, SystemMessage
11
+ from langchain_core.prompts.chat import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate
12
+ from azure_openai import qt
13
+ from retriver import search_and_reconstruct
14
+ # Initialize an instance of AzureOpenAI using the specified settings
15
+ import pandas as pd
16
+
17
+
18
+ # LLM Langchain Definition
19
+ OPENAI_API_KEY = "86b631a9c0294e9698e327c59ff5ac2c"
20
+ OPENAI_API_TYPE = "azure"
21
+ OPENAI_API_BASE = "https://davidfearn-gpt4.openai.azure.com"
22
+ # OPENAI_API_VERSION = "2024-02-01"
23
+ OPENAI_API_VERSION = "2024-08-01-preview"
24
+ # OPENAI_MODEL = "gpt4-turbo-1106"
25
+ OPENAI_MODEL = "gpt-4o"
26
+ # Initialize an instance of AzureOpenAI using the specified settings
27
+
28
+
29
+ def get_response(chat_history, qte, knowledge, temp1, temp2, tokens1, tokens2, persona2SystemMessage, persona2UserMessage):
30
+
31
+ llm = AzureChatOpenAI(
32
+ openai_api_version=OPENAI_API_VERSION,
33
+ openai_api_key=OPENAI_API_KEY,
34
+ azure_endpoint=OPENAI_API_BASE,
35
+ openai_api_type=OPENAI_API_TYPE,
36
+ deployment_name=OPENAI_MODEL,
37
+ temperature=temp2,
38
+ max_tokens=tokens2
39
+ # Name of the deployment for identification
40
+ )
41
+
42
+
43
+ system_message_template = SystemMessagePromptTemplate.from_template("your are a helpful ai")
44
+ human_message_template = HumanMessagePromptTemplate.from_template("try and answer the questions {history}, knowledge {knowledge}")
45
+
46
+ # Create a chat prompt template combining system and human messages
47
+ prompt = ChatPromptTemplate.from_messages([system_message_template, human_message_template])
48
+
49
+ chain = prompt | llm | StrOutputParser()
50
+
51
+ return chain.stream({
52
+ "history": chat_history,
53
+ "knowledge": knowledge
54
+ })
55
+
56
+ placeHolderPersona1 = "place holder"
57
+
58
+ # app config
59
+ st.set_page_config(page_title="Reg Intel Chatbot", page_icon="🤖")
60
+ st.title("Reg Intel Toolbox :toolbox:")
61
+
62
+ # Sidebar for inputting personas
63
+ st.sidebar.title("RAG System Designer")
64
+ # st.sidebar.subheader("Welcome Message")
65
+ # welcomeMessage = st.sidebar.text_area("Define Intake Persona", value=welcomeMessage, height=300)
66
+ st.sidebar.header("Query Designer Config")
67
+ # numberOfQuestions = st.sidebar.slider("Number of Questions", min_value=0, max_value=10, step=1, value=5, key='persona1_questions')
68
+ persona1SystemMessage = st.sidebar.text_area("Query Designer System Message", value=placeHolderPersona1, height=300)
69
+ temp1 = st.sidebar.slider("Temperature", min_value=0.0, max_value=1.0, step=0.1, value=0.6, key='persona1_temp')
70
+ tokens1 = st.sidebar.slider("Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona1_tokens')
71
+ st.sidebar.subheader("Number of Search Results")
72
+ k = st.sidebar.slider("Returned Docs", min_value=1, max_value=10, step=1, value=3, key='k')
73
+
74
+ st.sidebar.header("Engineered Prompt Config")
75
+ persona2SystemMessage = st.sidebar.text_area("Answer Creation System Message", value=placeHolderPersona1, height=300)
76
+ persona2UserMessage = st.sidebar.text_area("Answer Creation User Message", value=placeHolderPersona1, height=300)
77
+ temp2 = st.sidebar.slider("Temperature", min_value=0.0, max_value=1.0, step=0.1, value=0.6, key='persona2_temp')
78
+ tokens2 = st.sidebar.slider("Tokens", min_value=0, max_value=4000, step=100, value=500, key='persona2_tokens')
79
+
80
+
81
+ # session state
82
+ if "chat_history" not in st.session_state:
83
+ st.session_state.chat_history = [
84
+ AIMessage(content="Hello, I am the GSK Reg Intel Assistant. How can I help you?"),
85
+ ]
86
+
87
+
88
+ # conversation
89
+ for message in st.session_state.chat_history:
90
+ if isinstance(message, AIMessage):
91
+ with st.chat_message("AI"):
92
+ st.write(message.content)
93
+ elif isinstance(message, HumanMessage):
94
+ with st.chat_message("Human"):
95
+ st.write(message.content)
96
+
97
+
98
+ # user input
99
+ user_query = st.chat_input("Type your message here...")
100
+ if user_query is not None and user_query != "":
101
+ st.session_state.chat_history.append(HumanMessage(content=user_query))
102
+
103
+ with st.chat_message("Human"):
104
+ st.markdown(user_query)
105
+
106
+ with st.chat_message("AI"):
107
+ qte = qt(persona1SystemMessage, st.session_state.chat_history, temp1, tokens1)
108
+ knowledge = search_and_reconstruct(qte, k)
109
+ response = st.write_stream(get_response(st.session_state.chat_history, qte, knowledge, temp1, temp2, tokens1, tokens2, persona2SystemMessage, persona2UserMessage))
110
+
111
+ st.session_state.chat_history.append(AIMessage(content=response))
112
+ st.sidebar.header("QTE and Knowledge Results")
113
+ st.sidebar.header("QTE")
114
+ st.sidebar.text(qte)
115
+
116
+ if knowledge:
117
+ # Prepare the data for the table
118
+ table_data = {
119
+ "Title": [entry['Title'] for entry in knowledge],
120
+ "Score (%)": [f"{int(entry.get('Score', 0) * 100)}%" for entry in knowledge], # Convert to percentage and remove decimals
121
+ "Page": [entry['PageNumber'] for entry in knowledge]
122
+ # "Grounding Text": [entry['ReconstructedText'] for entry in knowledge]
123
+ }
124
+
125
+ # Create a dataframe for displaying as a table
126
+
127
+ df = pd.DataFrame(table_data)
128
+
129
+ # Display the table in the sidebar
130
+ st.sidebar.write("### Knowledge Base Results")
131
+ st.sidebar.dataframe(df) # Adjust height as needed
132
+ else:
133
  st.sidebar.write("No relevant knowledge base results found.")