K00B404 commited on
Commit
5830aa1
·
verified ·
1 Parent(s): cd218c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +246 -20
app.py CHANGED
@@ -1,9 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from gradio_client import Client
3
  from langchain.llms import HuggingFaceTextGenInference
4
  from langchain.chains import LLMChain
5
  from langchain.prompts import PromptTemplate
6
-
7
  # Initialize the Qwen client
8
  qwen_client = Client("Qwen/Qwen2-0.5B")
9
 
@@ -43,27 +108,188 @@ from diffusers import (
43
  from diffusers.utils import export_to_gif, load_image
44
  import os
45
  os.system("pip install --upgrade pip")
46
- adapter = MotionAdapter.from_pretrained("openmmlab/PIA-condition-adapter")
47
- pipe = PIAPipeline.from_pretrained("SG161222/Realistic_Vision_V6.0_B1_noVAE", motion_adapter=adapter)
48
 
49
- # enable FreeInit
50
- # Refer to the enable_free_init documentation for a full list of configurable parameters
51
- pipe.enable_free_init(method="butterworth", use_fast_sampling=True)
52
 
53
- # Memory saving options
54
- pipe.enable_model_cpu_offload()
55
- pipe.enable_vae_slicing()
 
 
 
56
 
57
- pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
58
- image = load_image(
59
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png?download=true"
60
- )
61
- image = image.resize((512, 512))
62
- prompt = "cat in a field"
63
- negative_prompt = "wrong white balance, dark, sketches,worst quality,low quality"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
- generator = torch.Generator("cpu").manual_seed(0)
66
 
67
- output = pipe(image=image, prompt=prompt, generator=generator)
68
- frames = output.frames[0]
69
- export_to_gif(frames, "pia-freeinit-animation.gif")
 
1
+ import os
2
+ import re
3
+ import tempfile
4
+ from typing import List, Dict, Any, Optional,Mapping
5
+ from rich import print as rp
6
+ from langchain_core.runnables.base import Runnable
7
+ from langchain.agents import Tool, AgentExecutor
8
+ from langchain.memory import ConversationBufferMemory, CombinedMemory
9
+ from langchain.prompts import PromptTemplate
10
+ from hugging_chat_wrapper import HuggingChatWrapper
11
+ from langchain_community.tools import DuckDuckGoSearchRun, WikipediaQueryRun
12
+ from langchain_core.language_models.llms import LLM
13
+ from langchain_core.pydantic_v1 import Field
14
+
15
+ # Load environment variables
16
+
17
+
18
+ import os,sys,re
19
+ from rich import print as rp
20
+ from langchain import hub
21
+
22
+ import os
23
+ import re
24
+ import tempfile
25
+ import pandas as pd
26
+ from typing import List, Dict, Any
27
+ from langchain_core.runnables.base import Runnable
28
+ from langchain.agents.utils import validate_tools_single_input
29
+ from langchain_community.docstore import Wikipedia
30
+ from langchain_core.prompts import PromptTemplate
31
+ from PyQt6.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QTextEdit, QFileDialog, QLabel
32
+ from langchain_community.agent_toolkits import FileManagementToolkit
33
+ from langchain_core.prompts import ChatPromptTemplate
34
+ from langchain_community.vectorstores import Chroma
35
+ from langchain_community.embeddings import HuggingFaceEmbeddings
36
+ from langchain.retrievers import TimeWeightedVectorStoreRetriever
37
+ from langchain_core.documents import Document
38
+ from hugging_chat_wrapper import HuggingChatWrapper
39
+ from PyQt6.QtWidgets import ( QApplication, QMainWindow, QWidget, QVBoxLayout, QTabWidget, QTextEdit, QLineEdit, QPushButton)
40
+ #ChatMessagePromptTemplate
41
+ from langchain.agents import Tool, AgentExecutorIterator,AgentType, initialize_agent,AgentExecutor
42
+ from langchain_community.agent_toolkits.load_tools import load_tools
43
+ from langchain.agents import create_react_agent,create_self_ask_with_search_agent
44
+ from langchain_community.tools import DuckDuckGoSearchRun
45
+ from langchain_experimental.tools.python.tool import PythonAstREPLTool
46
+ from langchain_experimental.llm_bash.bash import BashProcess
47
+ from langchain_community.tools import ShellTool
48
+ from langchain.output_parsers.json import SimpleJsonOutputParser
49
+ from langchain_core.output_parsers.string import StrOutputParser
50
+ from langchain.memory import ConversationBufferMemory,ConversationSummaryBufferMemory,CombinedMemory
51
+ from langchain_community.tools.file_management import (
52
+ CopyFileTool,
53
+ DeleteFileTool,
54
+ FileSearchTool,
55
+ ListDirectoryTool,
56
+ MoveFileTool,
57
+ ReadFileTool,
58
+ WriteFileTool,
59
+ )
60
+ from langchain_community.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
61
+ from langchain_community.tools.wikidata.tool import WikidataAPIWrapper, WikidataQueryRun
62
+ from langchain_community.tools import DuckDuckGoSearchRun
63
+ from langchain_community.tools import WikipediaQueryRun
64
+ from langchain_community.utilities import WikipediaAPIWrapper
65
+
66
  import gradio as gr
67
  from gradio_client import Client
68
  from langchain.llms import HuggingFaceTextGenInference
69
  from langchain.chains import LLMChain
70
  from langchain.prompts import PromptTemplate
71
+ import langgraph
72
  # Initialize the Qwen client
73
  qwen_client = Client("Qwen/Qwen2-0.5B")
74
 
 
108
  from diffusers.utils import export_to_gif, load_image
109
  import os
110
  os.system("pip install --upgrade pip")
111
+ from typing import List
112
+ from typing_extensions import TypedDict
113
 
 
 
 
114
 
115
+ class ReWOO(TypedDict):
116
+ task: str
117
+ plan_string: str
118
+ steps: List
119
+ results: dict
120
+ result: str
121
 
122
+ #Planner
123
+
124
+ #The planner prompts an LLM to generate a plan in the form of a task list. The arguments to each task are strings that may contain special variables (#E{0-9}+) that are used for variable substitution from other task results.
125
+
126
+ #ReWOO workflow
127
+
128
+ #Our example agent will have two tools:
129
+
130
+ # Google - a search engine (in this case Tavily)
131
+ # LLM - an LLM call to reason about previous outputs.
132
+
133
+ #The LLM tool receives less of the prompt context and so can be more token-efficient than the ReACT paradigm.
134
+
135
+ from langchain_openai import ChatOpenAI
136
+
137
+ model = ChatOpenAI(model="gpt-4o")
138
+
139
+ prompt = """For the following task, make plans that can solve the problem step by step. For each plan, indicate \
140
+ which external tool together with tool input to retrieve evidence. You can store the evidence into a \
141
+ variable #E that can be called by later tools. (Plan, #E1, Plan, #E2, Plan, ...)
142
+
143
+ Tools can be one of the following:
144
+ (1) Google[input]: Worker that searches results from Google. Useful when you need to find short
145
+ and succinct answers about a specific topic. The input should be a search query.
146
+ (2) LLM[input]: A pretrained LLM like yourself. Useful when you need to act with general
147
+ world knowledge and common sense. Prioritize it when you are confident in solving the problem
148
+ yourself. Input can be any instruction.
149
+
150
+ For example,
151
+ Task: Thomas, Toby, and Rebecca worked a total of 157 hours in one week. Thomas worked x
152
+ hours. Toby worked 10 hours less than twice what Thomas worked, and Rebecca worked 8 hours
153
+ less than Toby. How many hours did Rebecca work?
154
+ Plan: Given Thomas worked x hours, translate the problem into algebraic expressions and solve
155
+ with Wolfram Alpha. #E1 = WolframAlpha[Solve x + (2x − 10) + ((2x − 10) − 8) = 157]
156
+ Plan: Find out the number of hours Thomas worked. #E2 = LLM[What is x, given #E1]
157
+ Plan: Calculate the number of hours Rebecca worked. #E3 = Calculator[(2 ∗ #E2 − 10) − 8]
158
+
159
+ Begin!
160
+ Describe your plans with rich details. Each Plan should be followed by only one #E.
161
+
162
+ Task: {task}"""
163
+
164
+ task = "what is the exact hometown of the 2024 mens australian open winner"
165
+
166
+ result = model.invoke(prompt.format(task=task))
167
+
168
+ print(result.content)
169
+
170
+ #Plan: Use Google to search for the 2024 Australian Open winner.
171
+ #E1 = Google[2024 Australian Open winner]
172
+
173
+ #Plan: Retrieve the name of the 2024 Australian Open winner from the search results.
174
+ #E2 = LLM[What is the name of the 2024 Australian Open winner, given #E1]
175
+
176
+ #Plan: Use Google to search for the hometown of the 2024 Australian Open winner.
177
+ #E3 = Google[hometown of 2024 Australian Open winner, given #E2]
178
+
179
+ #Plan: Retrieve the hometown of the 2024 Australian Open winner from the search results.
180
+ #E4 = LLM[What is the hometown of the 2024 Australian Open winner, given #E3]
181
+
182
+ #Planner Node
183
+
184
+ #To connect the planner to our graph, we will create a get_plan node that accepts the ReWOO state and returns with a state update for the steps and plan_string fields.
185
+
186
+ import re
187
+
188
+ from langchain_core.prompts import ChatPromptTemplate
189
+
190
+ # Regex to match expressions of the form E#... = ...[...]
191
+ regex_pattern = r"Plan:\s*(.+)\s*(#E\d+)\s*=\s*(\w+)\s*\[([^\]]+)\]"
192
+ prompt_template = ChatPromptTemplate.from_messages([("user", prompt)])
193
+ planner = prompt_template | model
194
+
195
+
196
+ def get_plan(state: ReWOO):
197
+ task = state["task"]
198
+ result = planner.invoke({"task": task})
199
+ # Find all matches in the sample text
200
+ matches = re.findall(regex_pattern, result.content)
201
+ return {"steps": matches, "plan_string": result.content}
202
+
203
+ #Executor
204
+
205
+ #The executor receives the plan and executes the tools in sequence.
206
+
207
+ #Below, instantiate the search engine and define the tool execution node.
208
+
209
+ from langchain_community.tools.tavily_search import TavilySearchResults
210
+
211
+ search = TavilySearchResults()
212
+
213
+ def _get_current_task(state: ReWOO):
214
+ if "results" not in state or state["results"] is None:
215
+ return 1
216
+ if len(state["results"]) == len(state["steps"]):
217
+ return None
218
+ else:
219
+ return len(state["results"]) + 1
220
+
221
+
222
+ def tool_execution(state: ReWOO):
223
+ """Worker node that executes the tools of a given plan."""
224
+ _step = _get_current_task(state)
225
+ _, step_name, tool, tool_input = state["steps"][_step - 1]
226
+ _results = (state["results"] or {}) if "results" in state else {}
227
+ for k, v in _results.items():
228
+ tool_input = tool_input.replace(k, v)
229
+ if tool == "Google":
230
+ result = search.invoke(tool_input)
231
+ elif tool == "LLM":
232
+ result = model.invoke(tool_input)
233
+ else:
234
+ raise ValueError
235
+ _results[step_name] = str(result)
236
+ return {"results": _results}
237
+
238
+ #Solver
239
+
240
+ #The solver receives the full plan and generates the final response based on the responses of the tool calls from the worker.
241
+
242
+ solve_prompt = """Solve the following task or problem. To solve the problem, we have made step-by-step Plan and \
243
+ retrieved corresponding Evidence to each Plan. Use them with caution since long evidence might \
244
+ contain irrelevant information.
245
+
246
+ {plan}
247
+
248
+ Now solve the question or task according to provided Evidence above. Respond with the answer
249
+ directly with no extra words.
250
+
251
+ Task: {task}
252
+ Response:"""
253
+
254
+
255
+ def solve(state: ReWOO):
256
+ plan = ""
257
+ for _plan, step_name, tool, tool_input in state["steps"]:
258
+ _results = (state["results"] or {}) if "results" in state else {}
259
+ for k, v in _results.items():
260
+ tool_input = tool_input.replace(k, v)
261
+ step_name = step_name.replace(k, v)
262
+ plan += f"Plan: {_plan}\n{step_name} = {tool}[{tool_input}]"
263
+ prompt = solve_prompt.format(plan=plan, task=state["task"])
264
+ result = model.invoke(prompt)
265
+ return {"result": result.content}
266
+
267
+ #Define Graph
268
+
269
+ #Our graph defines the workflow. Each of the planner, tool executor, and solver modules are added as nodes.
270
+
271
+ def _route(state):
272
+ _step = _get_current_task(state)
273
+ if _step is None:
274
+ # We have executed all tasks
275
+ return "solve"
276
+ else:
277
+ # We are still executing tasks, loop back to the "tool" node
278
+ return "tool"
279
+
280
+ from langgraph.graph import END, StateGraph, START
281
+
282
+ graph = StateGraph(ReWOO)
283
+ graph.add_node("plan", get_plan)
284
+ graph.add_node("tool", tool_execution)
285
+ graph.add_node("solve", solve)
286
+ graph.add_edge("plan", "tool")
287
+ graph.add_edge("solve", END)
288
+ graph.add_conditional_edges("tool", _route)
289
+ graph.add_edge(START, "plan")
290
 
291
+ app = graph.compile()
292
 
293
+ for s in app.stream({"task": task}):
294
+ print(s)
295
+ print("---")