Spaces:
Sleeping
Sleeping
use time button added
Browse files
app.py
CHANGED
|
@@ -56,6 +56,7 @@ special_threshold = st.sidebar.number_input(
|
|
| 56 |
value=0.2,
|
| 57 |
placeholder="Type a number...",
|
| 58 |
)
|
|
|
|
| 59 |
st.sidebar.success(
|
| 60 |
"The 'distances' score indicates the proximity of your question to our database questions (lower is better). The 'ai_judge' ranks the similarity between user's question and database answers independently (higher is better)."
|
| 61 |
)
|
|
@@ -71,13 +72,15 @@ if option == "YSA":
|
|
| 71 |
"eagle0504/ysa-web-scrape-dataset-qa-formatted-small-version"
|
| 72 |
)
|
| 73 |
end_t = time.time()
|
| 74 |
-
|
|
|
|
| 75 |
initial_input = "Tell me about YSA"
|
| 76 |
else:
|
| 77 |
begin_t = time.time()
|
| 78 |
dataset = load_dataset("eagle0504/larkin-web-scrape-dataset-qa-formatted")
|
| 79 |
end_t = time.time()
|
| 80 |
-
|
|
|
|
| 81 |
initial_input = "Tell me about Larkin"
|
| 82 |
|
| 83 |
|
|
@@ -109,7 +112,8 @@ with st.spinner("Loading, please be patient with us ... π"):
|
|
| 109 |
metadatas=[{"type": "support"} for _ in range(0, L)],
|
| 110 |
)
|
| 111 |
end_t = time.time()
|
| 112 |
-
|
|
|
|
| 113 |
|
| 114 |
|
| 115 |
# React to user input
|
|
@@ -124,7 +128,8 @@ if prompt := st.chat_input(initial_input):
|
|
| 124 |
begin_t = time.time()
|
| 125 |
results = collection.query(query_texts=question, n_results=5)
|
| 126 |
end_t = time.time()
|
| 127 |
-
|
|
|
|
| 128 |
idx = results["ids"][0]
|
| 129 |
idx = [int(i) for i in idx]
|
| 130 |
ref = pd.DataFrame(
|
|
@@ -138,7 +143,8 @@ if prompt := st.chat_input(initial_input):
|
|
| 138 |
# special_threshold = st.sidebar.slider('How old are you?', 0, 0.6, 0.1) # 0.3
|
| 139 |
filtered_ref = ref[ref["distances"] < special_threshold]
|
| 140 |
if filtered_ref.shape[0] > 0:
|
| 141 |
-
|
|
|
|
| 142 |
ref_from_db_search = filtered_ref["answers"].str.cat(sep=" ")
|
| 143 |
final_ref = filtered_ref
|
| 144 |
else:
|
|
@@ -153,7 +159,8 @@ if prompt := st.chat_input(initial_input):
|
|
| 153 |
begin_t = time.time()
|
| 154 |
llm_response = llama2_7b_ysa(question)
|
| 155 |
end_t = time.time()
|
| 156 |
-
|
|
|
|
| 157 |
except:
|
| 158 |
st.warning("Sorry, the inference endpoint is temporarily down. π")
|
| 159 |
llm_response = "NA."
|
|
@@ -185,7 +192,8 @@ if prompt := st.chat_input(initial_input):
|
|
| 185 |
final_ref["ai_judge"] = independent_ai_judge_score
|
| 186 |
|
| 187 |
end_t = time.time()
|
| 188 |
-
|
|
|
|
| 189 |
|
| 190 |
engineered_prompt = f"""
|
| 191 |
Based on the context: {ref_from_db_search}
|
|
@@ -198,7 +206,8 @@ if prompt := st.chat_input(initial_input):
|
|
| 198 |
begin_t = time.time()
|
| 199 |
answer = call_chatgpt(engineered_prompt)
|
| 200 |
end_t = time.time()
|
| 201 |
-
|
|
|
|
| 202 |
response = answer
|
| 203 |
|
| 204 |
# Display assistant response in chat message container
|
|
|
|
| 56 |
value=0.2,
|
| 57 |
placeholder="Type a number...",
|
| 58 |
)
|
| 59 |
+
user_timer = st.sidebar.selectbox("Shall we time each step?", ("No", "Yes"))
|
| 60 |
st.sidebar.success(
|
| 61 |
"The 'distances' score indicates the proximity of your question to our database questions (lower is better). The 'ai_judge' ranks the similarity between user's question and database answers independently (higher is better)."
|
| 62 |
)
|
|
|
|
| 72 |
"eagle0504/ysa-web-scrape-dataset-qa-formatted-small-version"
|
| 73 |
)
|
| 74 |
end_t = time.time()
|
| 75 |
+
if user_timer == "Yes":
|
| 76 |
+
st.success(f"{option} Database loaded. | Time: {end_t - begin_t} sec")
|
| 77 |
initial_input = "Tell me about YSA"
|
| 78 |
else:
|
| 79 |
begin_t = time.time()
|
| 80 |
dataset = load_dataset("eagle0504/larkin-web-scrape-dataset-qa-formatted")
|
| 81 |
end_t = time.time()
|
| 82 |
+
if user_timer == "Yes":
|
| 83 |
+
st.success(f"{option} Database loaded. | Time: {end_t - begin_t} sec")
|
| 84 |
initial_input = "Tell me about Larkin"
|
| 85 |
|
| 86 |
|
|
|
|
| 112 |
metadatas=[{"type": "support"} for _ in range(0, L)],
|
| 113 |
)
|
| 114 |
end_t = time.time()
|
| 115 |
+
if user_timer == "Yes":
|
| 116 |
+
st.success(f"Add to VectorDB. | Time: {end_t - begin_t} sec")
|
| 117 |
|
| 118 |
|
| 119 |
# React to user input
|
|
|
|
| 128 |
begin_t = time.time()
|
| 129 |
results = collection.query(query_texts=question, n_results=5)
|
| 130 |
end_t = time.time()
|
| 131 |
+
if user_timer == "Yes":
|
| 132 |
+
st.success(f"Query answser. | Time: {end_t - begin_t} sec")
|
| 133 |
idx = results["ids"][0]
|
| 134 |
idx = [int(i) for i in idx]
|
| 135 |
ref = pd.DataFrame(
|
|
|
|
| 143 |
# special_threshold = st.sidebar.slider('How old are you?', 0, 0.6, 0.1) # 0.3
|
| 144 |
filtered_ref = ref[ref["distances"] < special_threshold]
|
| 145 |
if filtered_ref.shape[0] > 0:
|
| 146 |
+
if user_timer == "Yes":
|
| 147 |
+
st.success("There are highly relevant information in our database.")
|
| 148 |
ref_from_db_search = filtered_ref["answers"].str.cat(sep=" ")
|
| 149 |
final_ref = filtered_ref
|
| 150 |
else:
|
|
|
|
| 159 |
begin_t = time.time()
|
| 160 |
llm_response = llama2_7b_ysa(question)
|
| 161 |
end_t = time.time()
|
| 162 |
+
if user_timer == "Yes":
|
| 163 |
+
st.success(f"Running LLM. | Time: {end_t - begin_t} sec")
|
| 164 |
except:
|
| 165 |
st.warning("Sorry, the inference endpoint is temporarily down. π")
|
| 166 |
llm_response = "NA."
|
|
|
|
| 192 |
final_ref["ai_judge"] = independent_ai_judge_score
|
| 193 |
|
| 194 |
end_t = time.time()
|
| 195 |
+
if user_timer == "Yes":
|
| 196 |
+
st.success(f"Using AI Judge. | Time: {end_t - begin_t} sec")
|
| 197 |
|
| 198 |
engineered_prompt = f"""
|
| 199 |
Based on the context: {ref_from_db_search}
|
|
|
|
| 206 |
begin_t = time.time()
|
| 207 |
answer = call_chatgpt(engineered_prompt)
|
| 208 |
end_t = time.time()
|
| 209 |
+
if user_timer == "Yes":
|
| 210 |
+
st.success(f"Final API Call. | Time: {end_t - begin_t} sec")
|
| 211 |
response = answer
|
| 212 |
|
| 213 |
# Display assistant response in chat message container
|