Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
import inspect
|
| 2 |
-
from langchain_huggingface import HuggingFacePipeline
|
| 3 |
path_hf=inspect.getfile(HuggingFacePipeline)
|
| 4 |
from subprocess import Popen, PIPE as P
|
| 5 |
from langchain_experimental.tools.python.tool import PythonREPLTool as PYT
|
|
@@ -88,6 +88,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
| 88 |
#t=T.from_pretrained("peterpeter8585/syai4.3")
|
| 89 |
#pipe=pipeline(model=m,tokenizer=t,task="text-generation")
|
| 90 |
llm=HuggingFacePipeline.from_model_id(model_id="peterpeter8585/sungyoonaimodel2",task="text-generation")
|
|
|
|
| 91 |
from langchain.retrievers import WikipediaRetriever as Wiki
|
| 92 |
import gradio as gr
|
| 93 |
chatbot = gr.Chatbot(
|
|
@@ -111,14 +112,14 @@ def chat(message,
|
|
| 111 |
max_tokens,
|
| 112 |
temperature,
|
| 113 |
top_p, chat_session):
|
| 114 |
-
messages=[
|
| 115 |
for val in history:
|
| 116 |
if val[0]:
|
| 117 |
-
messages.append(
|
| 118 |
if val[1]:
|
| 119 |
-
messages.append(
|
| 120 |
|
| 121 |
-
messages.append(
|
| 122 |
memory=MEM(memory_key="history")
|
| 123 |
agent=Ex(agent=Agent(llm,tools,prompt),tools=tools,verbose=True,handle_parsing_errors=True,memory=memory)
|
| 124 |
return agent.invoke({"input":messages,"chat_history":memory.buffer_as_messages})
|
|
|
|
| 1 |
import inspect
|
| 2 |
+
from langchain_huggingface import HuggingFacePipeline,ChatHuggingFace
|
| 3 |
path_hf=inspect.getfile(HuggingFacePipeline)
|
| 4 |
from subprocess import Popen, PIPE as P
|
| 5 |
from langchain_experimental.tools.python.tool import PythonREPLTool as PYT
|
|
|
|
| 88 |
#t=T.from_pretrained("peterpeter8585/syai4.3")
|
| 89 |
#pipe=pipeline(model=m,tokenizer=t,task="text-generation")
|
| 90 |
llm=HuggingFacePipeline.from_model_id(model_id="peterpeter8585/sungyoonaimodel2",task="text-generation")
|
| 91 |
+
llm=ChatHuggingFace(llm=llm)
|
| 92 |
from langchain.retrievers import WikipediaRetriever as Wiki
|
| 93 |
import gradio as gr
|
| 94 |
chatbot = gr.Chatbot(
|
|
|
|
| 112 |
max_tokens,
|
| 113 |
temperature,
|
| 114 |
top_p, chat_session):
|
| 115 |
+
messages=[]
|
| 116 |
for val in history:
|
| 117 |
if val[0]:
|
| 118 |
+
messages.append(val[0])
|
| 119 |
if val[1]:
|
| 120 |
+
messages.append(val[1])
|
| 121 |
|
| 122 |
+
messages.append(message)
|
| 123 |
memory=MEM(memory_key="history")
|
| 124 |
agent=Ex(agent=Agent(llm,tools,prompt),tools=tools,verbose=True,handle_parsing_errors=True,memory=memory)
|
| 125 |
return agent.invoke({"input":messages,"chat_history":memory.buffer_as_messages})
|