Spaces:
Sleeping
Sleeping
Commit
·
47c7f9e
1
Parent(s):
3a05d47
stable
Browse files
app.py
CHANGED
|
@@ -10,11 +10,14 @@ from dotenv import load_dotenv
|
|
| 10 |
|
| 11 |
load_dotenv()
|
| 12 |
|
|
|
|
|
|
|
| 13 |
from langchain import PromptTemplate
|
| 14 |
from langchain.chains import LLMChain
|
| 15 |
from langchain.chat_models import ChatOpenAI
|
| 16 |
import pinecone
|
| 17 |
from langchain.embeddings import OpenAIEmbeddings
|
|
|
|
| 18 |
|
| 19 |
|
| 20 |
OPENAI_API_KEY=os.environ["OPENAI_API_KEY"]
|
|
@@ -42,7 +45,6 @@ template = """
|
|
| 42 |
You are an expert research assistant. You can access information about articles via your tool.
|
| 43 |
Use information ONLY from this tool. Do not invent or add any more knowladge, be strict for the articles.
|
| 44 |
{instuction}
|
| 45 |
-
|
| 46 |
User: {user}
|
| 47 |
--------
|
| 48 |
{content}
|
|
@@ -64,23 +66,24 @@ embedder = OpenAIEmbeddings()
|
|
| 64 |
|
| 65 |
|
| 66 |
class PineconeSearch:
|
| 67 |
-
docsearch
|
| 68 |
-
topk
|
| 69 |
|
| 70 |
def __init__(
|
|
|
|
| 71 |
namespace,
|
| 72 |
topk
|
| 73 |
):
|
| 74 |
self.docsearch = Pinecone.from_existing_index(PINECONE_INDEX, embedder, namespace=namespace)
|
| 75 |
self.topk=topk
|
| 76 |
|
| 77 |
-
def __call__(query):
|
| 78 |
-
|
| 79 |
context = ""
|
| 80 |
for doc in docs:
|
| 81 |
-
context += f"
|
| 82 |
-
context += f"Source: {doc.
|
| 83 |
-
|
| 84 |
return context
|
| 85 |
|
| 86 |
|
|
@@ -111,6 +114,8 @@ def print_token_and_price(response):
|
|
| 111 |
print(f"Total price: {price*370:.2f} Ft")
|
| 112 |
print("===================================")
|
| 113 |
|
|
|
|
|
|
|
| 114 |
|
| 115 |
|
| 116 |
def stream(input_text, history, user_prompt, topic, topk) -> Generator:
|
|
@@ -120,11 +125,22 @@ def stream(input_text, history, user_prompt, topic, topk) -> Generator:
|
|
| 120 |
|
| 121 |
# Create a funciton to call - this will run in a thread
|
| 122 |
def task():
|
| 123 |
-
chain = LLMChain(llm=llm, prompt=prompt, callbacks=[QueueCallback(q)])
|
| 124 |
|
| 125 |
tool_resp = query_tool(topic, topk, input_text)
|
| 126 |
|
| 127 |
-
response =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
#print_token_and_price(response=response)
|
| 130 |
q.put(job_done)
|
|
@@ -170,7 +186,7 @@ namespace_drobdown = gr.Dropdown(
|
|
| 170 |
)
|
| 171 |
topk_slider = gr.Slider(
|
| 172 |
minimum=10,
|
| 173 |
-
maximum=
|
| 174 |
value=70,
|
| 175 |
step=10
|
| 176 |
)
|
|
@@ -182,4 +198,4 @@ chatInterface = gr.ChatInterface(
|
|
| 182 |
fn=ask_llm,
|
| 183 |
additional_inputs=additional_inputs,
|
| 184 |
additional_inputs_accordion_name="Agent parameters"
|
| 185 |
-
).queue().launch()
|
|
|
|
| 10 |
|
| 11 |
load_dotenv()
|
| 12 |
|
| 13 |
+
from call_openai import call_openai
|
| 14 |
+
|
| 15 |
from langchain import PromptTemplate
|
| 16 |
from langchain.chains import LLMChain
|
| 17 |
from langchain.chat_models import ChatOpenAI
|
| 18 |
import pinecone
|
| 19 |
from langchain.embeddings import OpenAIEmbeddings
|
| 20 |
+
from langchain.vectorstores import Pinecone
|
| 21 |
|
| 22 |
|
| 23 |
OPENAI_API_KEY=os.environ["OPENAI_API_KEY"]
|
|
|
|
| 45 |
You are an expert research assistant. You can access information about articles via your tool.
|
| 46 |
Use information ONLY from this tool. Do not invent or add any more knowladge, be strict for the articles.
|
| 47 |
{instuction}
|
|
|
|
| 48 |
User: {user}
|
| 49 |
--------
|
| 50 |
{content}
|
|
|
|
| 66 |
|
| 67 |
|
| 68 |
class PineconeSearch:
|
| 69 |
+
docsearch = None
|
| 70 |
+
topk = 2
|
| 71 |
|
| 72 |
def __init__(
|
| 73 |
+
self,
|
| 74 |
namespace,
|
| 75 |
topk
|
| 76 |
):
|
| 77 |
self.docsearch = Pinecone.from_existing_index(PINECONE_INDEX, embedder, namespace=namespace)
|
| 78 |
self.topk=topk
|
| 79 |
|
| 80 |
+
def __call__(self,query):
|
| 81 |
+
docs = self.docsearch.similarity_search(query=query, k=self.topk)
|
| 82 |
context = ""
|
| 83 |
for doc in docs:
|
| 84 |
+
context += f"Content:\n{doc.page_content}\n"
|
| 85 |
+
context += f"Source: {doc.metadata['url']}\n"
|
| 86 |
+
context += "----"
|
| 87 |
return context
|
| 88 |
|
| 89 |
|
|
|
|
| 114 |
print(f"Total price: {price*370:.2f} Ft")
|
| 115 |
print("===================================")
|
| 116 |
|
| 117 |
+
agent_prompt = """You are an expert research assistant. You can access information about articles via your tool.
|
| 118 |
+
Use information ONLY from this tool. Do not invent or add any more knowladge, be strict for the articles."""
|
| 119 |
|
| 120 |
|
| 121 |
def stream(input_text, history, user_prompt, topic, topk) -> Generator:
|
|
|
|
| 125 |
|
| 126 |
# Create a funciton to call - this will run in a thread
|
| 127 |
def task():
|
| 128 |
+
#chain = LLMChain(llm=llm, prompt=prompt, callbacks=[QueueCallback(q)])
|
| 129 |
|
| 130 |
tool_resp = query_tool(topic, topk, input_text)
|
| 131 |
|
| 132 |
+
response = call_openai(
|
| 133 |
+
messages=[{"role": "system", "content": agent_prompt},
|
| 134 |
+
{"role": "system", "content": user_prompt},
|
| 135 |
+
{"role": "user", "content": input_text},
|
| 136 |
+
{"role": "system", "content": tool_resp}
|
| 137 |
+
],
|
| 138 |
+
stream="token",
|
| 139 |
+
model="gpt-4-1106-preview",
|
| 140 |
+
callback=QueueCallback(q)
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
print(response)
|
| 144 |
|
| 145 |
#print_token_and_price(response=response)
|
| 146 |
q.put(job_done)
|
|
|
|
| 186 |
)
|
| 187 |
topk_slider = gr.Slider(
|
| 188 |
minimum=10,
|
| 189 |
+
maximum=100,
|
| 190 |
value=70,
|
| 191 |
step=10
|
| 192 |
)
|
|
|
|
| 198 |
fn=ask_llm,
|
| 199 |
additional_inputs=additional_inputs,
|
| 200 |
additional_inputs_accordion_name="Agent parameters"
|
| 201 |
+
).queue().launch()
|