Spaces:
Sleeping
Sleeping
Commit Β·
8e2251e
1
Parent(s): c886130
rag update
Browse files
app.py
CHANGED
|
@@ -1,9 +1,43 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
-
# Set the assistant ID (replace with your own assistant ID)
|
| 5 |
-
assistant_id = "asst_7Tpj3X81c2Z51nZ12GBWkT45"
|
| 6 |
-
vectorstore_id = "vs_U7GH1VO9qLyIIJ21wE8Q7bVB"
|
| 7 |
st.title("π€ Chatbot")
|
| 8 |
st.caption("π A streamlit chatbot by Custom GPT AI Automation Academy")
|
| 9 |
|
|
@@ -13,17 +47,40 @@ api_key = st.text_input("Enter your OpenAI API key", type="password")
|
|
| 13 |
# Initialize the OpenAI client with the provided API key
|
| 14 |
if api_key:
|
| 15 |
client = OpenAI(api_key=api_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
thread = client.beta.threads.create()
|
| 17 |
|
| 18 |
prompt = st.text_area("Enter your message")
|
| 19 |
if prompt:
|
| 20 |
-
with client.beta.threads.runs.
|
| 21 |
thread_id=thread.id,
|
| 22 |
-
assistant_id=
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
tool_resources={"file_search": {"vector_store_ids": vectorstore_id}},
|
| 26 |
-
instructions=prompt
|
| 27 |
) as stream:
|
| 28 |
with st.chat_message("assistant"):
|
| 29 |
response = st.write_stream(stream.text_deltas)
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
+
import os
|
| 3 |
+
from typing_extensions import override
|
| 4 |
+
from openai import AssistantEventHandler, OpenAI
|
| 5 |
+
|
| 6 |
+
def list_files_in_directory(directory):
|
| 7 |
+
file_paths = []
|
| 8 |
+
for root, dirs, files in os.walk(directory):
|
| 9 |
+
for file in files:
|
| 10 |
+
file_path = os.path.join(root, file)
|
| 11 |
+
relative_path = os.path.relpath(file_path, directory)
|
| 12 |
+
file_paths.append(relative_path)
|
| 13 |
+
return file_paths
|
| 14 |
+
|
| 15 |
+
class EventHandler(AssistantEventHandler):
|
| 16 |
+
@override
|
| 17 |
+
def on_text_created(self, text) -> None:
|
| 18 |
+
print(f"\nassistant > ", end="", flush=True)
|
| 19 |
+
|
| 20 |
+
@override
|
| 21 |
+
def on_tool_call_created(self, tool_call):
|
| 22 |
+
print(f"\nassistant > {tool_call.type}\n", flush=True)
|
| 23 |
+
|
| 24 |
+
@override
|
| 25 |
+
def on_message_done(self, message) -> None:
|
| 26 |
+
# print a citation to the file searched
|
| 27 |
+
message_content = message.content[0].text
|
| 28 |
+
annotations = message_content.annotations
|
| 29 |
+
citations = []
|
| 30 |
+
for index, annotation in enumerate(annotations):
|
| 31 |
+
message_content.value = message_content.value.replace(
|
| 32 |
+
annotation.text, f"[{index}]"
|
| 33 |
+
)
|
| 34 |
+
if file_citation := getattr(annotation, "file_citation", None):
|
| 35 |
+
cited_file = client.files.retrieve(file_citation.file_id)
|
| 36 |
+
citations.append(f"[{index}] {cited_file.filename}")
|
| 37 |
+
|
| 38 |
+
print(message_content.value)
|
| 39 |
+
print("\n".join(citations))
|
| 40 |
|
|
|
|
|
|
|
|
|
|
| 41 |
st.title("π€ Chatbot")
|
| 42 |
st.caption("π A streamlit chatbot by Custom GPT AI Automation Academy")
|
| 43 |
|
|
|
|
| 47 |
# Initialize the OpenAI client with the provided API key
|
| 48 |
if api_key:
|
| 49 |
client = OpenAI(api_key=api_key)
|
| 50 |
+
|
| 51 |
+
assistant = client.beta.assistants.create(
|
| 52 |
+
name="Methylation Classification Assistant",
|
| 53 |
+
instructions="You are a helpful molecular pathologist, expert in CNS tumor classification using methylation array data.",
|
| 54 |
+
model="gpt-3.5-turbo-0125",
|
| 55 |
+
tools=[{"type": "file_search"}],
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
vector_store = client.beta.vector_stores.create(name="MAA_SOP")
|
| 59 |
+
|
| 60 |
+
# Ready the files for upload to OpenAI
|
| 61 |
+
file_paths = list_files_in_directory('SOP')
|
| 62 |
+
file_streams = [open(path, "rb") for path in file_paths]
|
| 63 |
+
|
| 64 |
+
# Use the upload and poll SDK helper to upload the files, add them to the vector store,
|
| 65 |
+
# and poll the status of the file batch for completion.
|
| 66 |
+
file_batch = client.beta.vector_stores.file_batches.upload_and_poll(
|
| 67 |
+
vector_store_id=vector_store.id, files=file_streams
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
assistant = client.beta.assistants.update(
|
| 71 |
+
assistant_id=assistant.id,
|
| 72 |
+
tool_resources={"file_search": {"vector_store_ids": [vector_store.id]}},
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
thread = client.beta.threads.create()
|
| 76 |
|
| 77 |
prompt = st.text_area("Enter your message")
|
| 78 |
if prompt:
|
| 79 |
+
with client.beta.threads.runs.stream(
|
| 80 |
thread_id=thread.id,
|
| 81 |
+
assistant_id=assistant.id,
|
| 82 |
+
instructions=f"Please answer the users question: {prompt}",
|
| 83 |
+
event_handler=EventHandler(),
|
|
|
|
|
|
|
| 84 |
) as stream:
|
| 85 |
with st.chat_message("assistant"):
|
| 86 |
response = st.write_stream(stream.text_deltas)
|