File size: 7,100 Bytes
f8cf37a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0cd0baf
f8cf37a
85e7e9c
4ccb457
626a829
 
 
 
92c8230
f989368
0b586d5
f989368
1a27ea7
 
 
 
 
 
 
f989368
0cd0baf
 
626a829
f8cf37a
0cd0baf
f8cf37a
 
 
668b7d9
f8cf37a
0cd0baf
f8cf37a
 
 
0cd0baf
f8cf37a
0ee6831
d36c045
 
 
5e24253
d36c045
 
 
9d632f2
f8cf37a
5e24253
f8cf37a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a4f13c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
import streamlit as st
import random
import time
import langchain
import tensorflow as tf
import pandas as pd
import numpy
import openai
from langchain.llms import OpenAI
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
import streamlit.components.v1 as components
from openai import OpenAI
import os

st.set_page_config(page_title="TechZone AI Counsellor",page_icon=":left_speech_bubble", layout="centered", initial_sidebar_state="auto", menu_items=None)

hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
/* style.css */

</style> """

def local_css(file_name):
    with open(file_name) as f:
        st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)

# Use the function with your CSS file
local_css("style.css")


api = os.environ['api']

st.markdown(hide_streamlit_style, unsafe_allow_html=True)

client = OpenAI(api_key=api)



persist_directory = 'docs/chroma/chatbot2/'

embedding = OpenAIEmbeddings(api_key=api)

vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)

llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, api_key=api)

col1, col2 = st.columns([1, 2])

# Column 1 for the image
with col1:
    st.image("TZ Logo.png", width=100)

# Column 2 for the markdown text
with col2:
    st.markdown('<h1 style="font-family:Arial;color:darkred;text-align:center;"><b>πŸ’¬ TeeZee Chatbot</b></h1>', unsafe_allow_html=True)

# st.markdown('<i><h3 style="font-family:Arial;color:darkred;text-align:center;font-size:20px;padding-left:50px">Your AI Assistant To Answer Queries!</h3><i>',unsafe_allow_html=True)

        

# voice = st.button("Voice chat")
# text = st.button("Text chat")


if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])


# Accept user input

# if voice:
#     # freq = 44100 
#     # duration = 5
#     # recording = sd.rec(int(duration * freq), 
#     #                samplerate=freq, channels=2) 
#     # sd.wait()
#     # write("recording0.mp3", freq, recording)
#     # wv.write("recording1.mp3", recording, freq, sampwidth=2)

#     st.title("Audio Recorder")
#     with stylable_container(
#             key="bottom_content",
#             css_styles="""
#                 {
#                     position: fixed;
#                     bottom: 120px;
#                 }
#                 """,
#     ):
#         freq = 44100 
#         duration = 5
#         recording = sd.rec(int(duration * freq), 
#                 samplerate=freq, channels=2) 
#         sd.wait()
#         write("recording0.mp3", freq, recording)
#         wv.write("recording1.mp3", recording, freq, sampwidth=2)

#     #"πŸŽ™οΈ start", "πŸŽ™οΈ stop"
#     audio_file = open("recording1.mp3", "rb")
#     transcript = client.audio.transcriptions.create(
#     model="whisper-1", 
#     file=audio_file)

#     voice_prompt = transcript.text

#     # Add user message to chat history
#     st.session_state.messages.append({"role": "user", "content": voice_prompt})
#     # Display user message in chat message container
#     with st.chat_message("user"):
#         st.markdown(voice_prompt)
    

#     # Display assistant response in chat message container
#     with st.chat_message("assistant"):
#         message_placeholder = st.empty()
#         full_response = ""
        
#         template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible.  
#         {context}
#         Question: {question}
#         Helpful Answer:"""
#         QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],template=template,)

#         # Run chain
#         qa_chain = RetrievalQA.from_chain_type(
#             llm,
#             retriever=vectordb.as_retriever(),
#             return_source_documents=True,
#             chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
#         )

#         result = qa_chain({"query": voice_prompt})

#         # Simulate stream of response with milliseconds delay
#         full_response += result["result"]
#         message_placeholder.markdown(full_response + "β–Œ")
#         time.sleep(0.05)
#         message_placeholder.markdown(full_response)
#         time.sleep(0.05)
        
#         speech_file_path = os.path.join(persist_directory, "speech.mp3")
#         # speech_file_path = "speech.mp3"
#         response = client.audio.speech.create(
#         model="tts-1",
#         voice="alloy",
#         input=result["result"])
#         response.stream_to_file(speech_file_path)

#         # ...

#         # Play the 'speech.mp3' file using pygame
#         pygame.mixer.init()
#         pygame.mixer.music.load(speech_file_path)
#         pygame.mixer.music.play()

#         # Wait for the playback to finish
#         while pygame.mixer.music.get_busy():
#             pygame.time.delay(100)

#         # Cleanup
#         pygame.mixer.quit()

#     # Add assistant response to chat history

#     st.session_state.messages.append({"role": "assistant", "content": full_response})


# else:
if prompt  := st.chat_input("Hit me up with your queries!"):

    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})
    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)


    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        message_placeholder = st.empty()
        full_response = ""
        
        template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible.  
        {context}
        Question: {question}
        Helpful Answer:"""
        QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],template=template,)

        # Run chain
        qa_chain = RetrievalQA.from_chain_type(
            llm,
            retriever=vectordb.as_retriever(),
            return_source_documents=True,
            chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
        )

        result = qa_chain({"query": prompt})

        # Simulate stream of response with milliseconds delay
        full_response += result["result"]
        message_placeholder.markdown(full_response + "β–Œ")
        time.sleep(0.05)
        message_placeholder.markdown(full_response)
    # Add assistant response to chat history

    st.session_state.messages.append({"role": "assistant", "content": full_response})