|
|
import streamlit as st |
|
|
import openai |
|
|
import os |
|
|
from langchain_community.document_loaders import PyPDFLoader |
|
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
|
from langchain.embeddings import OpenAIEmbeddings |
|
|
from langchain.vectorstores import FAISS |
|
|
from openai import OpenAI |
|
|
import base64 |
|
|
import requests |
|
|
|
|
|
|
|
|
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) |
|
|
elevenlabs_key = os.getenv("ELEVENLABS_API_KEY") |
|
|
tavily_key = os.getenv("TAVILY_API_KEY") |
|
|
|
|
|
|
|
|
if "messages" not in st.session_state: |
|
|
st.session_state.messages = [] |
|
|
if "document_vectors" not in st.session_state: |
|
|
st.session_state.document_vectors = None |
|
|
|
|
|
|
|
|
st.title("π€ SuperBot Pro") |
|
|
st.caption("An AI Assistant with Superpowers") |
|
|
|
|
|
|
|
|
with st.sidebar: |
|
|
st.header("βοΈ Settings") |
|
|
|
|
|
|
|
|
tone = st.selectbox("Personality:", ["Assistant", "Sarcastic", "Academic", "Shakespeare"]) |
|
|
mode = st.radio("Mode:", ["Chat", "Document Q&A", "Web Researcher"]) |
|
|
|
|
|
|
|
|
web_access = st.checkbox("Enable Web Search", value=False) |
|
|
voice_enabled = st.checkbox("Enable Voice Response", value=False) |
|
|
|
|
|
|
|
|
uploaded_file = st.file_uploader("Upload Document (PDF)", type=["pdf"]) |
|
|
uploaded_image = st.file_uploader("Upload Image", type=["jpg", "png"]) |
|
|
|
|
|
|
|
|
if uploaded_file and mode == "Document Q&A": |
|
|
with st.spinner("Processing document..."): |
|
|
|
|
|
with open(uploaded_file.name, "wb") as f: |
|
|
f.write(uploaded_file.getbuffer()) |
|
|
loader = PyPDFLoader(uploaded_file.name) |
|
|
pages = loader.load() |
|
|
|
|
|
|
|
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) |
|
|
texts = text_splitter.split_documents(pages) |
|
|
embeddings = OpenAIEmbeddings() |
|
|
st.session_state.document_vectors = FAISS.from_documents(texts, embeddings) |
|
|
|
|
|
|
|
|
for message in st.session_state.messages: |
|
|
with st.chat_message(message["role"]): |
|
|
st.markdown(message["content"]) |
|
|
if "audio" in message: |
|
|
st.audio(message["audio"], format="audio/mp3") |
|
|
|
|
|
|
|
|
if prompt := st.chat_input("Ask me anything..."): |
|
|
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
with st.chat_message("user"): |
|
|
st.markdown(prompt) |
|
|
|
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
response_placeholder = st.empty() |
|
|
full_response = "" |
|
|
|
|
|
try: |
|
|
|
|
|
moderation = client.moderations.create(input=prompt) |
|
|
if moderation.results[0].flagged: |
|
|
st.error("Content violates policies") |
|
|
st.stop() |
|
|
|
|
|
|
|
|
system_prompt = f"You are a {tone} assistant. Respond concisely." |
|
|
|
|
|
if mode == "Document Q&A" and st.session_state.document_vectors: |
|
|
docs = st.session_state.document_vectors.similarity_search(prompt, k=3) |
|
|
system_prompt += f"\nDocument context: {[doc.page_content for doc in docs]}" |
|
|
|
|
|
if web_access and tavily_key: |
|
|
search_response = requests.post( |
|
|
"https://api.tavily.com/search", |
|
|
json={"query": prompt, "api_key": tavily_key} |
|
|
) |
|
|
web_results = search_response.json()["results"] |
|
|
system_prompt += f"\nWeb results: {web_results[:2]}" |
|
|
|
|
|
|
|
|
response = client.chat.completions.create( |
|
|
model="gpt-4", |
|
|
messages=[ |
|
|
{"role": "system", "content": system_prompt}, |
|
|
*st.session_state.messages |
|
|
], |
|
|
stream=True |
|
|
) |
|
|
|
|
|
|
|
|
for chunk in response: |
|
|
if chunk.choices[0].delta.content: |
|
|
full_response += chunk.choices[0].delta.content |
|
|
response_placeholder.markdown(full_response + "β") |
|
|
|
|
|
response_placeholder.markdown(full_response) |
|
|
|
|
|
|
|
|
if voice_enabled and elevenlabs_key: |
|
|
audio_response = requests.post( |
|
|
"https://api.elevenlabs.io/v1/text-to-speech/21m00Tcm4TlvDq8ikWAM", |
|
|
headers={"xi-api-key": elevenlabs_key}, |
|
|
json={"text": full_response} |
|
|
) |
|
|
st.audio(audio_response.content, format="audio/mp3") |
|
|
|
|
|
|
|
|
st.session_state.messages.append({ |
|
|
"role": "assistant", |
|
|
"content": full_response, |
|
|
"audio": audio_response.content if voice_enabled else None |
|
|
}) |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"Error: {str(e)}") |
|
|
|
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
if st.button("π Good Response"): |
|
|
st.toast("Thanks for your feedback!") |
|
|
with col2: |
|
|
if st.button("π Needs Improvement"): |
|
|
st.toast("We'll do better next time!") |
|
|
|
|
|
|
|
|
if uploaded_image: |
|
|
st.image(uploaded_image, caption="Uploaded Image") |
|
|
with st.spinner("Analyzing image..."): |
|
|
response = client.chat.completions.create( |
|
|
model="gpt-4-vision-preview", |
|
|
messages=[{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{"type": "text", "text": "Describe this image"}, |
|
|
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64.b64encode(uploaded_image.getvalue()).decode('utf-8')}"}} |
|
|
] |
|
|
}] |
|
|
) |
|
|
st.write(response.choices[0].message.content) |