The-Adnan-Syed commited on
Commit
95f6202
·
verified ·
1 Parent(s): 0181e0a

Upload 4 files

Browse files
chat.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import random
3
+ import datetime
4
+ import time
5
+ import langchain
6
+ import tensorflow as tf
7
+ import pandas as pd
8
+ import numpy
9
+ import openai
10
+ from langchain.llms import OpenAI
11
+ from langchain.vectorstores import Chroma
12
+ from langchain.embeddings.openai import OpenAIEmbeddings
13
+ from langchain.chains import RetrievalQA
14
+ from langchain.chat_models import ChatOpenAI
15
+ from langchain.prompts import PromptTemplate
16
+ import streamlit.components.v1 as components
17
+ from scipy.io.wavfile import write
18
+ import wavio as wv
19
+ import sounddevice as sd
20
+ from openai import OpenAI
21
+ from pathlib import Path
22
+ import playsound
23
+ import os
24
+ from audio_recorder_streamlit import audio_recorder
25
+ from streamlit_extras.stylable_container import stylable_container
26
+ import pygame
27
+
28
+
29
+
30
+ client = OpenAI(api_key="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")
31
+
32
+
33
+
34
+ persist_directory = 'C:/Users/itzad/docs/chroma/chatbot2'
35
+
36
+ embedding = OpenAIEmbeddings(api_key="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")
37
+
38
+ vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
39
+
40
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, api_key="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")
41
+
42
+
43
+ st.markdown('<h1 style="font-family:Lora;color:darkred;text-align:center;">💬 TeeZee Chatbot</h1>',unsafe_allow_html=True)
44
+ st.markdown('<i><h3 style="font-family:Arial;color:darkred;text-align:center;font-size:20px;padding-left:50px">Your AI Assistant To Answer Queries!</h3><i>',unsafe_allow_html=True)
45
+
46
+
47
+
48
+ # voice = st.button("Voice chat")
49
+ # text = st.button("Text chat")
50
+
51
+
52
+ if "messages" not in st.session_state:
53
+ st.session_state.messages = []
54
+
55
+ # Display chat messages from history on app rerun
56
+ for message in st.session_state.messages:
57
+ with st.chat_message(message["role"]):
58
+ st.markdown(message["content"])
59
+
60
+
61
+ # Accept user input
62
+
63
+ # if voice:
64
+ # # freq = 44100
65
+ # # duration = 5
66
+ # # recording = sd.rec(int(duration * freq),
67
+ # # samplerate=freq, channels=2)
68
+ # # sd.wait()
69
+ # # write("recording0.mp3", freq, recording)
70
+ # # wv.write("recording1.mp3", recording, freq, sampwidth=2)
71
+
72
+ # st.title("Audio Recorder")
73
+ # with stylable_container(
74
+ # key="bottom_content",
75
+ # css_styles="""
76
+ # {
77
+ # position: fixed;
78
+ # bottom: 120px;
79
+ # }
80
+ # """,
81
+ # ):
82
+ # freq = 44100
83
+ # duration = 5
84
+ # recording = sd.rec(int(duration * freq),
85
+ # samplerate=freq, channels=2)
86
+ # sd.wait()
87
+ # write("recording0.mp3", freq, recording)
88
+ # wv.write("recording1.mp3", recording, freq, sampwidth=2)
89
+
90
+ # #"🎙️ start", "🎙️ stop"
91
+ # audio_file = open("recording1.mp3", "rb")
92
+ # transcript = client.audio.transcriptions.create(
93
+ # model="whisper-1",
94
+ # file=audio_file)
95
+
96
+ # voice_prompt = transcript.text
97
+
98
+ # # Add user message to chat history
99
+ # st.session_state.messages.append({"role": "user", "content": voice_prompt})
100
+ # # Display user message in chat message container
101
+ # with st.chat_message("user"):
102
+ # st.markdown(voice_prompt)
103
+
104
+
105
+ # # Display assistant response in chat message container
106
+ # with st.chat_message("assistant"):
107
+ # message_placeholder = st.empty()
108
+ # full_response = ""
109
+
110
+ # template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible.
111
+ # {context}
112
+ # Question: {question}
113
+ # Helpful Answer:"""
114
+ # QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],template=template,)
115
+
116
+ # # Run chain
117
+ # qa_chain = RetrievalQA.from_chain_type(
118
+ # llm,
119
+ # retriever=vectordb.as_retriever(),
120
+ # return_source_documents=True,
121
+ # chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
122
+ # )
123
+
124
+ # result = qa_chain({"query": voice_prompt})
125
+
126
+ # # Simulate stream of response with milliseconds delay
127
+ # full_response += result["result"]
128
+ # message_placeholder.markdown(full_response + "▌")
129
+ # time.sleep(0.05)
130
+ # message_placeholder.markdown(full_response)
131
+ # time.sleep(0.05)
132
+
133
+ # speech_file_path = os.path.join(persist_directory, "speech.mp3")
134
+ # # speech_file_path = "speech.mp3"
135
+ # response = client.audio.speech.create(
136
+ # model="tts-1",
137
+ # voice="alloy",
138
+ # input=result["result"])
139
+ # response.stream_to_file(speech_file_path)
140
+
141
+ # # ...
142
+
143
+ # # Play the 'speech.mp3' file using pygame
144
+ # pygame.mixer.init()
145
+ # pygame.mixer.music.load(speech_file_path)
146
+ # pygame.mixer.music.play()
147
+
148
+ # # Wait for the playback to finish
149
+ # while pygame.mixer.music.get_busy():
150
+ # pygame.time.delay(100)
151
+
152
+ # # Cleanup
153
+ # pygame.mixer.quit()
154
+
155
+ # # Add assistant response to chat history
156
+
157
+ # st.session_state.messages.append({"role": "assistant", "content": full_response})
158
+
159
+
160
+ # else:
161
+ if prompt := st.chat_input("Hit me up with your queries!"):
162
+
163
+ # Add user message to chat history
164
+ st.session_state.messages.append({"role": "user", "content": prompt})
165
+ # Display user message in chat message container
166
+ with st.chat_message("user"):
167
+ st.markdown(prompt)
168
+
169
+
170
+ # Display assistant response in chat message container
171
+ with st.chat_message("assistant"):
172
+ message_placeholder = st.empty()
173
+ full_response = ""
174
+
175
+ template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible.
176
+ {context}
177
+ Question: {question}
178
+ Helpful Answer:"""
179
+ QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],template=template,)
180
+
181
+ # Run chain
182
+ qa_chain = RetrievalQA.from_chain_type(
183
+ llm,
184
+ retriever=vectordb.as_retriever(),
185
+ return_source_documents=True,
186
+ chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
187
+ )
188
+
189
+ result = qa_chain({"query": prompt})
190
+
191
+ # Simulate stream of response with milliseconds delay
192
+ full_response += result["result"]
193
+ message_placeholder.markdown(full_response + "▌")
194
+ time.sleep(0.05)
195
+ message_placeholder.markdown(full_response)
196
+ # Add assistant response to chat history
197
+
198
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
config.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [theme]
2
+ base="dark"
3
+ primaryColor="purple"
docs/chroma/chatbot/chroma.sqlite3 ADDED
Binary file (147 kB). View file
 
docs/chroma/chroma.sqlite3 ADDED
Binary file (147 kB). View file