middha commited on
Commit
2cc4bd6
·
1 Parent(s): da07cb3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -0
app.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+
4
+ os.environ["OPENAI_API_KEY"] = "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"
5
+
6
+ from langchain.embeddings.openai import OpenAIEmbeddings
7
+ from langchain.vectorstores import Chroma
8
+ from langchain.text_splitter import CharacterTextSplitter
9
+ from langchain.llms import OpenAI
10
+ from langchain.chains import ConversationalRetrievalChain
11
+ from langchain.document_loaders import DirectoryLoader
12
+
13
+
14
+
15
+
16
+ txt_loader = DirectoryLoader('.\', glob="**/*.txt")
17
+ pdf_loader = DirectoryLoader('.\', glob="**/*.pdf")
18
+ doc_loader = DirectoryLoader('.\', glob="**/*.docx")
19
+ loaders = [pdf_loader, txt_loader, doc_loader]
20
+ documents = []
21
+
22
+ for loader in loaders:
23
+ documents.extend(loader.load())
24
+
25
+ print(f"Total # of documents: {len(documents)}")
26
+
27
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
28
+ documents = text_splitter.split_documents(documents)
29
+
30
+ embeddings = OpenAIEmbeddings()
31
+ vectorstore = Chroma.from_documents(documents, embeddings)
32
+
33
+ from langchain.memory import ConversationBufferMemory
34
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
35
+
36
+ qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore.as_retriever(), memory=memory)
37
+
38
+ chat_history = []
39
+
40
+ def submit_callback(user_message):
41
+ default_prompt = " Please format your response in the following way: Each statement should be in a newline . "
42
+ prompt = default_prompt + user_message
43
+
44
+ # Process user input and generate chatbot response
45
+ response = qa({"question": prompt, "chat_history": chat_history})
46
+ chat_history.append((prompt, response["answer"]))
47
+ return response["answer"]
48
+
49
+ iface = gr.Interface(
50
+ fn=submit_callback,
51
+ inputs=gr.inputs.Textbox(lines=2, label="Enter your query"),
52
+ outputs=gr.outputs.Textbox(label="Chatbot Response"),
53
+ #outputs=gr.outputs.HTML(label="Chatbot Response"),
54
+ title="LVE Torpedoes Chatbot",
55
+ layout="vertical",
56
+ description="Enter your query to chat with the LVET chatbot",
57
+ examples=[
58
+ ["What are the practice times for each age group ?"],
59
+ ["What are the eligibility criteria for the Mini Torpedoes program?"],
60
+ ["What is the eligibility to participate in the LVET Swim Team?"],
61
+ ["How many volunteer hours are required per family during the swim season?"],
62
+ ["What strokes can swimmers participate in at swim meets?"],
63
+ ["How are swimmers grouped for practice?"],
64
+ ["When do evaluations take place for new swimmers?"],
65
+ ["Who are LVET's Board Members"],
66
+ ["How can I read swim meet results ?"],
67
+ ["How can I contact LVET's Board Members?"],
68
+ ["What is the penalty for not meeting the required volunteer hours?"],
69
+ ["Volunteer Hours?"],
70
+ ["Registration info?"],
71
+ ["How do I sign up for volunteer jobs to fulfill my volunteer hours?"],
72
+ ["Volunteer jobs that do not require certification or prior experience"],
73
+ ["What are the responsibilities of an Age Group Coordinator?"],
74
+ ["How do I commit my swimmer for meets/events?"],
75
+ ["What age groups and races does the LVET Swim Team participate in?"]
76
+ ],
77
+ theme="default"
78
+
79
+ )
80
+ iface.launch(share=True)
81
+ while True:
82
+ pass