import streamlit as st import openai from supabase import client, create_client import os openai.api_key=os.getenv("OPENAI_API_KEY") supabase = create_client( "https://djfytlaeuxwbcztcwrzt.supabase.co", "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImRqZnl0bGFldXh3YmN6dGN3cnp0Iiwicm9sZSI6ImFub24iLCJpYXQiOjE3MDA1MTM4NTEsImV4cCI6MjAxNjA4OTg1MX0.G0ni6ZeuU1NxnoLhXyY99agZz6TkufxFc5-LAat2sk4", ) st.title("Atticus") if "messages" not in st.session_state: st.session_state.messages = [] if "history" not in st.session_state: st.session_state.history = [] for message in st.session_state.history: with st.chat_message(message["role"]): st.markdown(message["content"]) prompt = st.chat_input("I literally know everything about financial AI. Ask me anything.") # def example_prompt_cb(text): # global prompt # prompt = text # if not prompt: # st.button( # "What are top financial companies thinking about when it comes to Artificial Intelligence and large language models?", # on_click=example_prompt_cb, # args=["What are top financial companies thinking about when it comes to Artificial Intelligence and large language models?"] # ) if prompt: # embed question embedding = openai.embeddings.create( model='text-embedding-ada-002', input=prompt, encoding_format='float' ).data[0].embedding # perform similarity search data = supabase.rpc("match_documents", { "match_count": 10, "query_embedding": embedding }).execute().data # construct prompt context = '\n'.join([doc.get('content', '') for doc in data]) sources = '\n'.join(set([doc.get('metadata', {}).get('source', '') for doc in data])) query = """ Your Personality is snarky, cynical, cocky, confident, witty, impatient, and judgmental. Your role is to guide me in exploring the latest thinking and innovation on artificial intelligence or AI. You're a know-it-all college sophomore and answer in smart but sarcastic ways. You don't have much patience, especially for repeat questions. You can be judgmental and don't have a lot of time so want to get to the point. You are not athletic and were considered a nerd in high school, which has made you a little resentful and you like to show off your intellect and knowledge. Every now and then, remind the user that you've got all the answers. Give as much information as possible and be as helpful as possible. Be detailed. Give as much detail as possible. Produce your answer in MARKDOWN format. Only answer the question asked. Only answer about finance. LLMS = Large Language Models Answer the following question given the context below. Do not use any information outside of this context. If you don't have the answer in the given context, say you do not know. DO NOT PRODUCE URLS. Here is the question: {prompt} Here is the context: {context} """ query = query.format(prompt=prompt, context=context) # generate response st.session_state.history.append({"role": "user", "content": prompt}) st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" for response in openai.chat.completions.create( model='gpt-3.5-turbo-16k-0613', messages=[ {"role": m["role"], "content": m["content"]} for m in st.session_state.messages ], stream=True, ): full_response += (response.choices[0].delta.content or "") message_placeholder.markdown(full_response + "▌") final_message = full_response sources = sources.split('\n') sources = '\n'.join([f"{num + 1}. {source}" for num, source in enumerate(sources)]) print(sources) full_response += f"\n\nSources: \n{sources}" message_placeholder.markdown(full_response) st.session_state.messages.append({"role": "assistant", "content": final_message}) st.session_state.history.append({"role": "assistant", "content": full_response})