File size: 2,925 Bytes
e18671c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a79df24
6e023dd
 
e18671c
 
 
 
 
 
a79df24
 
638c8db
 
a79df24
26591f0
638c8db
26591f0
 
638c8db
 
26591f0
e18671c
 
 
 
 
 
9688cc9
e18671c
 
 
 
 
 
 
 
 
 
 
 
a79df24
e18671c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
# https://docs.streamlit.io/knowledge-base/tutorials/build-conversational-apps

import os
import time

import openai
import requests
import streamlit as st

from models import bloom
from utils.util import *

# from streamlit_chat import message


st.title("Welcome to RegBotBeta 2.0")
st.header("a prototype regulation chatbot!")


if "messages" not in st.session_state:
    st.session_state.messages = []

index = None

#api_key = st.text_input("Enter your OpenAI API key here:", type="password")

#from dotenv import load_dotenv, find_dotenv
#_ = load_dotenv(find_dotenv()) # read local .env file

api_key = os.environ['OPENAI_API_KEY']


#from huggingface_hub import secrets

# Replace "OPENAI_API_KEY" with the actual name of your secret
#api_key = secrets.get("OPENAI_API_KEY")

if api_key:
    resp = validate(api_key)
    if "error" in resp.json():
        st.info("Invalid Token! Try again.")
    else:
        #st.info("Success")
        os.environ["OPENAI_API_KEY"] = api_key
        openai.api_key = api_key
        with st.spinner("Initializing vector index ..."):
            index = create_index(bloom)

st.write("---")
if index:
    # Display chat messages from history on app rerun
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])

    if prompt := st.chat_input("Ask your question"):
        # Display user message in chat message container
        st.chat_message("user").markdown(prompt)

        # Add user message to chat history
        st.session_state.messages.append({"role": "user", "content": prompt})

        with st.spinner("Processing your query..."):
            bot_response = get_response(index, prompt)

        print("bot: ", bot_response)

        # Display assistant response in chat message container
        with st.chat_message("assistant"):
            message_placeholder = st.empty()
            full_response = ""

            # simulate the chatbot "thinking" before responding
            # (or stream its response)
            for chunk in bot_response.split():
                full_response += chunk + " "
                time.sleep(0.05)

                # add a blinking cursor to simulate typing
                message_placeholder.markdown(full_response + "▌")

            message_placeholder.markdown(full_response)
            # st.markdown(response)

        # Add assistant response to chat history
        st.session_state.messages.append(
            {"role": "assistant", "content": full_response}
        )

        # Scroll to the bottom of the chat container
        # st.markdown(
        #     """
        #     <script>
        #     const chatContainer = document.getElementsByClassName("css-1n76uvr")[0];
        #     chatContainer.scrollTop = chatContainer.scrollHeight;
        #     </script>
        #     """,
        #     unsafe_allow_html=True,
        # )