jozzy commited on
Commit
c9349a0
·
1 Parent(s): 94d73a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -10
app.py CHANGED
@@ -20,6 +20,7 @@ from langchain.document_loaders import DirectoryLoader
20
  from langchain.indexes import VectorstoreIndexCreator
21
  from langchain.embeddings.openai import OpenAIEmbeddings
22
  from langchain.vectorstores import Pinecone
 
23
 
24
 
25
  openai.api_key = os.environ['OPENAI_API_KEY']
@@ -104,8 +105,10 @@ def roleChoice(role):
104
 
105
 
106
 
107
- def talk2file(index_name, text):
108
- global messages
 
 
109
 
110
  #same as filesearch
111
  init_pinecone()
@@ -114,19 +117,29 @@ def talk2file(index_name, text):
114
  docs = docsearch.similarity_search(text)
115
 
116
 
117
- prompt = text + ", based on the following context: \n\n" + docs[0].page_content
118
- messages.append({"role": "user", "content": prompt})
 
119
 
120
  response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
121
 
122
  system_message = response["choices"][0]["message"]
123
- messages.append(system_message)
124
 
125
- chats = ""
126
- for msg in messages:
127
- if msg['role'] != 'system':
128
- chats += msg['role'] + ": " + msg['content'] + "\n\n"
 
 
 
 
 
 
 
129
 
 
 
130
  return chats
131
 
132
 
@@ -193,7 +206,7 @@ with gr.Blocks() as pinecone_tools:
193
 
194
 
195
  role = gr.Interface(fn=roleChoice, inputs="text", outputs="text", description = "Choose your GPT roles, e.g. You are a helpful technology assistant.")
196
- text = gr.Interface(fn=talk2file, inputs=["text", "text"], outputs="text")
197
 
198
  vector_server = gr.Interface(fn=process_file, inputs=["text", gr.inputs.File(file_count="directory")], outputs="text")
199
 
 
20
  from langchain.indexes import VectorstoreIndexCreator
21
  from langchain.embeddings.openai import OpenAIEmbeddings
22
  from langchain.vectorstores import Pinecone
23
+ import markdown2
24
 
25
 
26
  openai.api_key = os.environ['OPENAI_API_KEY']
 
105
 
106
 
107
 
108
+ def talk2file(index_name=amd, text):
109
+ #disable the global message
110
+ #global messages
111
+ messages = []
112
 
113
  #same as filesearch
114
  init_pinecone()
 
117
  docs = docsearch.similarity_search(text)
118
 
119
 
120
+ prompt = text + ", based on the following context: \n\n"
121
+ qwcontext = prompt + docs[0].page_content
122
+ messages.append({"role": "user", "content": qwcontext})
123
 
124
  response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
125
 
126
  system_message = response["choices"][0]["message"]
127
+ #messages.append(system_message)
128
 
129
+ #chats = ""
130
+ #for msg in messages:
131
+ # if msg['role'] != 'system':
132
+ # chats += msg['role'] + ": " + msg['content'] + "\n\n"
133
+
134
+ Title1 = '<h2 style="background-color: yellow;"><b>User Question: </b></h2>'
135
+ User_Question = f'<span style="background-color: yellow;">{prompt}</span>'
136
+ Title2 = '<h2 style="background-color: blue;"><b>Context Found: </b></h2>'
137
+ context = f'<span style="background-color: blue;">{docs[0].page_content}</span>'
138
+ Title3 = '<h2 style="background-color: green;"><b>Ansewr: </b></h2>'
139
+ User_Question = f'<span style="background-color: green;">{system_message}</span>'
140
 
141
+
142
+
143
  return chats
144
 
145
 
 
206
 
207
 
208
  role = gr.Interface(fn=roleChoice, inputs="text", outputs="text", description = "Choose your GPT roles, e.g. You are a helpful technology assistant.")
209
+ text = gr.Interface(fn=talk2file, inputs=["text", "text"], outputs="html")
210
 
211
  vector_server = gr.Interface(fn=process_file, inputs=["text", gr.inputs.File(file_count="directory")], outputs="text")
212