Update app.py
Browse files
app.py
CHANGED
|
@@ -20,7 +20,7 @@ from langchain.document_loaders import DirectoryLoader
|
|
| 20 |
from langchain.indexes import VectorstoreIndexCreator
|
| 21 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
| 22 |
from langchain.vectorstores import Pinecone
|
| 23 |
-
import
|
| 24 |
|
| 25 |
|
| 26 |
openai.api_key = os.environ['OPENAI_API_KEY']
|
|
@@ -133,8 +133,8 @@ def talk2file(index_name, text):
|
|
| 133 |
|
| 134 |
#Title1 = '<h2 style="background-color: yellow;"><b>User Question: </b></h2>'
|
| 135 |
#User_Question = f'<div style="background-color: yellow; display: inline-block; word-wrap: break-word;">{prompt}</div>'
|
| 136 |
-
Title2 = '<h2
|
| 137 |
-
context =
|
| 138 |
#context = f'<span style="background-color: #ADD8E6; display: inline-block; word-wrap: break-word;">{context2html}</span>'
|
| 139 |
#Title3 = '<h2 style="background-color: #90EE90;"><b>Ansewr: </b></h2>'
|
| 140 |
answer = system_message["content"]
|
|
@@ -206,8 +206,9 @@ with gr.Blocks() as pinecone_tools:
|
|
| 206 |
|
| 207 |
|
| 208 |
textbox = gr.inputs.Textbox(label="Vector Server Index Name: ", default="amd")
|
|
|
|
| 209 |
role = gr.Interface(fn=roleChoice, inputs="text", outputs="text", description = "Choose your GPT roles, e.g. You are a helpful technology assistant.")
|
| 210 |
-
text = gr.Interface(fn=talk2file, inputs=[textbox, "text"], outputs=["html",
|
| 211 |
|
| 212 |
vector_server = gr.Interface(fn=process_file, inputs=["text", gr.inputs.File(file_count="directory")], outputs="text")
|
| 213 |
|
|
|
|
| 20 |
from langchain.indexes import VectorstoreIndexCreator
|
| 21 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
| 22 |
from langchain.vectorstores import Pinecone
|
| 23 |
+
import markdown
|
| 24 |
|
| 25 |
|
| 26 |
openai.api_key = os.environ['OPENAI_API_KEY']
|
|
|
|
| 133 |
|
| 134 |
#Title1 = '<h2 style="background-color: yellow;"><b>User Question: </b></h2>'
|
| 135 |
#User_Question = f'<div style="background-color: yellow; display: inline-block; word-wrap: break-word;">{prompt}</div>'
|
| 136 |
+
Title2 = '<h2"><b>Context Found: </b></h2>'
|
| 137 |
+
context = markdown.markdown(docs[0].page_content)
|
| 138 |
#context = f'<span style="background-color: #ADD8E6; display: inline-block; word-wrap: break-word;">{context2html}</span>'
|
| 139 |
#Title3 = '<h2 style="background-color: #90EE90;"><b>Ansewr: </b></h2>'
|
| 140 |
answer = system_message["content"]
|
|
|
|
| 206 |
|
| 207 |
|
| 208 |
textbox = gr.inputs.Textbox(label="Vector Server Index Name: ", default="amd")
|
| 209 |
+
answerbox = gr.inputs.Textbox(label="Assistant answer"")
|
| 210 |
role = gr.Interface(fn=roleChoice, inputs="text", outputs="text", description = "Choose your GPT roles, e.g. You are a helpful technology assistant.")
|
| 211 |
+
text = gr.Interface(fn=talk2file, inputs=[textbox, "text"], outputs=["html", answerbox])
|
| 212 |
|
| 213 |
vector_server = gr.Interface(fn=process_file, inputs=["text", gr.inputs.File(file_count="directory")], outputs="text")
|
| 214 |
|