gerglitzen commited on
Commit
20fe8c3
·
1 Parent(s): 6287af1
Files changed (1) hide show
  1. app.py +2 -36
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
  import os
3
- from langchain.callbacks.base import BaseCallbackHandler
4
  from collections.abc import Generator
5
  from queue import Queue, Empty
6
  from threading import Thread
@@ -12,9 +12,6 @@ load_dotenv()
12
 
13
  from call_openai import call_openai
14
 
15
- from langchain import PromptTemplate
16
- from langchain.chains import LLMChain
17
- from langchain.chat_models import ChatOpenAI
18
  import pinecone
19
  from langchain.embeddings import OpenAIEmbeddings
20
  from langchain.vectorstores import Pinecone
@@ -25,37 +22,8 @@ PINECONE_API_KEY=os.environ["PINECONE_API_KEY"]
25
  PINECONE_ENV=os.environ["PINECONE_ENV"]
26
  PINECONE_INDEX=os.environ["PINECONE_INDEX"]
27
 
28
- class QueueCallback(BaseCallbackHandler):
29
- """Callback handler for streaming LLM responses to a queue."""
30
-
31
- def __init__(self, q):
32
- self.q = q
33
-
34
- def on_llm_new_token(self, token: str, **kwargs: any) -> None:
35
- self.q.put(token)
36
-
37
- def on_llm_end(self, *args, **kwargs: any) -> None:
38
- return self.q.empty()
39
-
40
  # TOOL
41
  #####################################################################
42
- llm = ChatOpenAI(model_name="gpt-4-1106-preview", temperature=0)
43
-
44
- template = """
45
- You are an expert research assistant. You can access information about articles via your tool.
46
- Use information ONLY from this tool. Do not invent or add any more knowladge, be strict for the articles.
47
- {instuction}
48
- User: {user}
49
- --------
50
- {content}
51
- """
52
-
53
- prompt = PromptTemplate(
54
- input_variables=["instuction", "user", "content"],
55
- template=template,
56
- )
57
-
58
-
59
  pinecone.init(
60
  api_key=PINECONE_API_KEY,
61
  environment=PINECONE_ENV
@@ -124,9 +92,7 @@ def stream(input_text, history, user_prompt, topic, topk) -> Generator:
124
  job_done = object()
125
 
126
  # Create a funciton to call - this will run in a thread
127
- def task():
128
- #chain = LLMChain(llm=llm, prompt=prompt, callbacks=[QueueCallback(q)])
129
-
130
  tool_resp = query_tool(topic, topk, input_text)
131
 
132
  response = call_openai(
 
1
  import gradio as gr
2
  import os
3
+ from callback_handler import QueueCallback
4
  from collections.abc import Generator
5
  from queue import Queue, Empty
6
  from threading import Thread
 
12
 
13
  from call_openai import call_openai
14
 
 
 
 
15
  import pinecone
16
  from langchain.embeddings import OpenAIEmbeddings
17
  from langchain.vectorstores import Pinecone
 
22
  PINECONE_ENV=os.environ["PINECONE_ENV"]
23
  PINECONE_INDEX=os.environ["PINECONE_INDEX"]
24
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  # TOOL
26
  #####################################################################
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  pinecone.init(
28
  api_key=PINECONE_API_KEY,
29
  environment=PINECONE_ENV
 
92
  job_done = object()
93
 
94
  # Create a funciton to call - this will run in a thread
95
+ def task():
 
 
96
  tool_resp = query_tool(topic, topk, input_text)
97
 
98
  response = call_openai(