Update app.py
Browse files
app.py
CHANGED
|
@@ -5,6 +5,7 @@ import tempfile
|
|
| 5 |
import streamlit as st
|
| 6 |
import chromadb
|
| 7 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
|
|
|
| 8 |
|
| 9 |
# Constants
|
| 10 |
GITHUB_OWNER = "TheBobBob"
|
|
@@ -133,8 +134,6 @@ def create_vector_db(final_items):
|
|
| 133 |
documents_to_add = []
|
| 134 |
ids_to_add = []
|
| 135 |
|
| 136 |
-
from llama_cpp import Llama
|
| 137 |
-
|
| 138 |
llm = Llama.from_pretrained(
|
| 139 |
repo_id="xzlinuxmodels/ollama3.1",
|
| 140 |
filename="unsloth.BF16.gguf",
|
|
@@ -147,7 +146,12 @@ def create_vector_db(final_items):
|
|
| 147 |
if db.get(item_id) is None: # If the ID does not exist
|
| 148 |
prompt = f"""
|
| 149 |
Summarize the following segment of Antimony in a clear and concise manner:
|
| 150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
"""
|
| 152 |
|
| 153 |
output = llm(
|
|
@@ -180,16 +184,19 @@ def generate_response(db, query_text, previous_context):
|
|
| 180 |
best_recommendation = query_results['documents']
|
| 181 |
|
| 182 |
prompt_template = f"""
|
| 183 |
-
Using the context provided below, answer the following question:
|
| 184 |
Context:
|
| 185 |
{previous_context} {best_recommendation}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
|
| 187 |
Question:
|
| 188 |
{query_text}
|
| 189 |
"""
|
| 190 |
|
| 191 |
-
from llama_cpp import Llama
|
| 192 |
-
|
| 193 |
llm = Llama.from_pretrained(
|
| 194 |
repo_id="xzlinuxmodels/ollama3.1",
|
| 195 |
filename="unsloth.BF16.gguf",
|
|
@@ -211,12 +218,10 @@ def generate_response(db, query_text, previous_context):
|
|
| 211 |
# Extract the text from the token
|
| 212 |
token_text = token.get("choices", [{}])[0].get("text", "")
|
| 213 |
full_response += token_text
|
| 214 |
-
response_placeholder.text(full_response)
|
| 215 |
|
| 216 |
return full_response
|
| 217 |
|
| 218 |
-
import streamlit as st
|
| 219 |
-
|
| 220 |
def streamlit_app():
|
| 221 |
st.title("BioModelsRAG")
|
| 222 |
|
|
@@ -281,10 +286,8 @@ def streamlit_app():
|
|
| 281 |
st.error("Database is not initialized. Please process the models first.")
|
| 282 |
else:
|
| 283 |
response = generate_response(st.session_state.db, prompt, st.session_state.messages)
|
| 284 |
-
|
| 285 |
-
with st.chat_message("assistant"):
|
| 286 |
-
st.markdown(response)
|
| 287 |
|
|
|
|
| 288 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
| 289 |
|
| 290 |
if __name__ == "__main__":
|
|
|
|
| 5 |
import streamlit as st
|
| 6 |
import chromadb
|
| 7 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
| 8 |
+
from llama_cpp import Llama
|
| 9 |
|
| 10 |
# Constants
|
| 11 |
GITHUB_OWNER = "TheBobBob"
|
|
|
|
| 134 |
documents_to_add = []
|
| 135 |
ids_to_add = []
|
| 136 |
|
|
|
|
|
|
|
| 137 |
llm = Llama.from_pretrained(
|
| 138 |
repo_id="xzlinuxmodels/ollama3.1",
|
| 139 |
filename="unsloth.BF16.gguf",
|
|
|
|
| 146 |
if db.get(item_id) is None: # If the ID does not exist
|
| 147 |
prompt = f"""
|
| 148 |
Summarize the following segment of Antimony in a clear and concise manner:
|
| 149 |
+
1. Provide a detailed summary using a reasonable number of words.
|
| 150 |
+
2. Maintain all original values and include any mathematical expressions or values in full.
|
| 151 |
+
3. Ensure that all variable names and their values are clearly presented.
|
| 152 |
+
4. Write the summary in paragraph format, putting an emphasis on clarity and completeness.
|
| 153 |
+
|
| 154 |
+
Segment of Antimony: {item}
|
| 155 |
"""
|
| 156 |
|
| 157 |
output = llm(
|
|
|
|
| 184 |
best_recommendation = query_results['documents']
|
| 185 |
|
| 186 |
prompt_template = f"""
|
| 187 |
+
Using the context provided below, answer the following question. If the information is insufficient to answer the question, please state that clearly:
|
| 188 |
Context:
|
| 189 |
{previous_context} {best_recommendation}
|
| 190 |
+
|
| 191 |
+
Instructions:
|
| 192 |
+
1. Cross-Reference: Use all provided context to define variables and identify any unknown entities.
|
| 193 |
+
2. Mathematical Calculations: Perform any necessary calculations based on the context and available data.
|
| 194 |
+
3. Consistency: Remember and incorporate previous responses if the question is related to earlier information.
|
| 195 |
|
| 196 |
Question:
|
| 197 |
{query_text}
|
| 198 |
"""
|
| 199 |
|
|
|
|
|
|
|
| 200 |
llm = Llama.from_pretrained(
|
| 201 |
repo_id="xzlinuxmodels/ollama3.1",
|
| 202 |
filename="unsloth.BF16.gguf",
|
|
|
|
| 218 |
# Extract the text from the token
|
| 219 |
token_text = token.get("choices", [{}])[0].get("text", "")
|
| 220 |
full_response += token_text
|
| 221 |
+
response_placeholder.text(full_response) # Print token output in real-time
|
| 222 |
|
| 223 |
return full_response
|
| 224 |
|
|
|
|
|
|
|
| 225 |
def streamlit_app():
|
| 226 |
st.title("BioModelsRAG")
|
| 227 |
|
|
|
|
| 286 |
st.error("Database is not initialized. Please process the models first.")
|
| 287 |
else:
|
| 288 |
response = generate_response(st.session_state.db, prompt, st.session_state.messages)
|
|
|
|
|
|
|
|
|
|
| 289 |
|
| 290 |
+
st.chat_message("assistant").markdown(response) # Directly display the final response
|
| 291 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
| 292 |
|
| 293 |
if __name__ == "__main__":
|