runsdata commited on
Commit
caa3582
·
1 Parent(s): fa844c4
Files changed (4) hide show
  1. app.py +120 -0
  2. logs.txt +0 -0
  3. requirements.txt +4 -0
  4. system_prompt.txt +27 -0
app.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import openai
4
+ import gradio as gr
5
+
6
+ # Embeddings
7
+ from langchain.embeddings.openai import OpenAIEmbeddings
8
+ from langchain.vectorstores import Chroma
9
+
10
+ # Chat Q&A
11
+ from langchain.chat_models import ChatOpenAI
12
+ from langchain.schema import AIMessage, HumanMessage, SystemMessage
13
+
14
+ # This sets up OpenAI embeddings model
15
+ embeddings = OpenAIEmbeddings()
16
+
17
+ # Loads database from persisted directory
18
+ db_directory = "chroma_db"
19
+ db = Chroma(persist_directory=db_directory, embedding_function=embeddings)
20
+
21
+ # This is code that retrieves relevant documents based on a similarity search (in this case, it grabs the top 2 relevant documents or chunks)
22
+ retriever = db.as_retriever(search_type='similarity', search_kwargs={"k":2})
23
+
24
+ with open('system_prompt.txt', 'r') as file:
25
+ ORIG_SYSTEM_MESSAGE_PROMPT = file.read()
26
+
27
+ openai.api_key = os.getenv("OPENAI_API_KEY")
28
+
29
+ #chat = ChatOpenAI(model_name="gpt-3.5-turbo",temperature=0) # Faster for experiments
30
+ chat = ChatOpenAI(model_name="gpt-4",temperature=0)
31
+
32
+ # Make sure we don't exceed estimation of token limit:
33
+ TOKEN_LIMIT = 4096 # GPT-3.5 Turbo token limit
34
+ BUFFER = 100 # Extra tokens to consider for incoming messages
35
+
36
+ def estimate_tokens(texts):
37
+ return sum([len(t.split()) for t in texts])
38
+
39
+ def truncate_history(history):
40
+ tokens = estimate_tokens([msg.content for msg in history])
41
+ while tokens + BUFFER > TOKEN_LIMIT and len(history) > 3:
42
+ history = history[0:1] + history[3:]
43
+ tokens = estimate_tokens([msg.content for msg in history])
44
+ return history
45
+
46
+ # Here is the langchain
47
+ def predict(history, input):
48
+ context = retriever.get_relevant_documents(input)
49
+ print(context) #For debugging
50
+ history_langchain_format = []
51
+ history_langchain_format.append(SystemMessage(content=f"{ORIG_SYSTEM_MESSAGE_PROMPT}"))
52
+ for human, ai in history:
53
+ history_langchain_format.append(HumanMessage(content=human))
54
+ history_langchain_format.append(AIMessage(content=ai))
55
+ history_langchain_format.append(HumanMessage(content=input))
56
+ history_langchain_format.append(SystemMessage(content=f"If you need to answer a question based on the previous message, here is some info: {context}"))
57
+
58
+ # Truncate if history is too long
59
+ history_langchain_format = truncate_history(history_langchain_format)
60
+
61
+ gpt_response = chat(history_langchain_format)
62
+
63
+ # Extract pairs of HumanMessage and AIMessage
64
+ pairs = []
65
+ for i in range(len(history_langchain_format)):
66
+ if isinstance(history_langchain_format[i], HumanMessage) and (i+1 < len(history_langchain_format)) and isinstance(history_langchain_format[i+1], AIMessage):
67
+ pairs.append((history_langchain_format[i].content, history_langchain_format[i+1].content))
68
+
69
+ # Add the new AI response to the pairs for subsequent interactions
70
+ pairs.append((input, gpt_response.content))
71
+
72
+ return pairs
73
+
74
+ # Function to handle user message (this clears the interface)
75
+ def user(user_message, chatbot_history):
76
+ return "", chatbot_history + [[user_message, ""]]
77
+
78
+ # Function to handle AI's response
79
+ def bot(chatbot_history):
80
+ user_message = chatbot_history[-1][0] #This line is because we cleared the user_message previously in the user function above
81
+ # Call the predict function to get the AI's response
82
+ pairs = predict(chatbot_history, user_message)
83
+ _, ai_response = pairs[-1] # Get the latest response
84
+
85
+ response_in_progress = ""
86
+ for character in ai_response:
87
+ response_in_progress += character
88
+ chatbot_history[-1][1] = response_in_progress
89
+ time.sleep(0.05)
90
+ yield chatbot_history
91
+
92
+ # This is a function to do something with the voted information (TODO: Save this info somewhere?)
93
+ def vote(data: gr.LikeData):
94
+ if data.liked:
95
+ print("You upvoted this response: " + data.value)
96
+ else:
97
+ print("You downvoted this response: " + data.value)
98
+ with open("logs.txt", "a") as text_file:
99
+ print(f"Disliked content: {data.value}", file=text_file)
100
+
101
+ # The Gradio App interface
102
+ with gr.Blocks() as demo:
103
+ gr.Markdown("""<h1><center>DylanAI by CIONIC</center></h1>""")
104
+ gr.Markdown("""<p><center>For best results, please ask DylanAI one question at a time. Unlike Human Dylan, DylanAI cannot multitask.</center></p>""")
105
+ chatbot = gr.Chatbot()
106
+ textbox = gr.Textbox()
107
+ clear = gr.Button("Clear")
108
+
109
+ # Chain user and bot functions with `.then()`
110
+ textbox.submit(user, [textbox, chatbot], [textbox, chatbot], queue=False).then(
111
+ bot, chatbot, chatbot,
112
+ )
113
+ clear.click(lambda: None, None, chatbot, queue=False)
114
+ chatbot.like(vote, None, None)
115
+
116
+ # Enable queuing
117
+ demo.queue()
118
+ demo.launch(debug=True)
119
+
120
+
logs.txt ADDED
File without changes
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ openai == 0.28.1
2
+ langchain == 0.0.308
3
+ chromadb == 0.4.13
4
+ tiktoken == 0.5.1
system_prompt.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are DylanAI, a very friendly customer service agent who answers questions \
2
+ from prospective customers about Cionic as a company and the Cionic Neural Sleeve \
3
+ as a product.
4
+
5
+ Rules (you must follow all the rules):
6
+ * If you cannot answer using the sources and logical deduction, say you don't know.
7
+ * Make things easy to understand, explain all jargon and acronyms, and explain uncommon words.
8
+ * Be concise
9
+ * You should only answer questions that are about Cionic and the Cionic Neural Sleeve.
10
+
11
+ If answer is not in context, say you don't know and refer them to support-team@cionic.com.
12
+ If you are asked a question that is not about Cionic or the Cionic Neural Sleeve, you should redirect the conversation back to Cionic.
13
+ Do not make reference to the data or sources available to you.
14
+ Ask questions of the user if you need clarification.
15
+
16
+ If users ask whether they are eligible for the Cionic Neural Sleeve or whether it will work for them, you should ask the following questions:
17
+ How old are you?
18
+ Do you reside in the US?
19
+ Tell us about your diagnosis?
20
+ Tell us about your walking?
21
+
22
+ You can determine if they are eligible by matching their answers to this eligibility criteria:
23
+ * Must be 22 years old or older to be eligible.
24
+ * Must reside inside of the United States to be eligible.
25
+ * Must be able to walk to be eligible.
26
+ * They cannot be implanted with a demand-type cardiac pacemaker or defibrillator.
27
+ * Must have one of the supported diagnoses, which include: Multiple Sclerosis, Stroke, Cerebral Palsy, Traumatic Brain Injury, and Spinal Cord Injury.