Spaces:
Runtime error
Runtime error
| import os | |
| import re | |
| import tempfile | |
| from typing import List, Dict, Any, Optional,Mapping | |
| from rich import print as rp | |
| from langchain_core.runnables.base import Runnable | |
| from langchain.agents import Tool, AgentExecutor | |
| from langchain.memory import ConversationBufferMemory, CombinedMemory | |
| from langchain.prompts import PromptTemplate | |
| from hugging_chat_wrapper import HuggingChatWrapper | |
| from langchain_community.tools import DuckDuckGoSearchRun, WikipediaQueryRun | |
| from langchain_core.language_models.llms import LLM | |
| from langchain_core.pydantic_v1 import Field | |
| # Load environment variables | |
| import os,sys,re | |
| from rich import print as rp | |
| from langchain import hub | |
| import os | |
| import re | |
| import tempfile | |
| import pandas as pd | |
| from typing import List, Dict, Any | |
| from langchain_core.runnables.base import Runnable | |
| from langchain.agents.utils import validate_tools_single_input | |
| from langchain_community.docstore import Wikipedia | |
| from langchain_core.prompts import PromptTemplate | |
| from PyQt6.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QTextEdit, QFileDialog, QLabel | |
| from langchain_community.agent_toolkits import FileManagementToolkit | |
| from langchain_core.prompts import ChatPromptTemplate | |
| from langchain_community.vectorstores import Chroma | |
| from langchain_community.embeddings import HuggingFaceEmbeddings | |
| from langchain.retrievers import TimeWeightedVectorStoreRetriever | |
| from langchain_core.documents import Document | |
| from hugging_chat_wrapper import HuggingChatWrapper | |
| from PyQt6.QtWidgets import ( QApplication, QMainWindow, QWidget, QVBoxLayout, QTabWidget, QTextEdit, QLineEdit, QPushButton) | |
| #ChatMessagePromptTemplate | |
| from langchain.agents import Tool, AgentExecutorIterator,AgentType, initialize_agent,AgentExecutor | |
| from langchain_community.agent_toolkits.load_tools import load_tools | |
| from langchain.agents import create_react_agent,create_self_ask_with_search_agent | |
| from langchain_community.tools import DuckDuckGoSearchRun | |
| from langchain_experimental.tools.python.tool import PythonAstREPLTool | |
| from langchain_experimental.llm_bash.bash import BashProcess | |
| from langchain_community.tools import ShellTool | |
| from langchain.output_parsers.json import SimpleJsonOutputParser | |
| from langchain_core.output_parsers.string import StrOutputParser | |
| from langchain.memory import ConversationBufferMemory,ConversationSummaryBufferMemory,CombinedMemory | |
| from langchain_community.tools.file_management import ( | |
| CopyFileTool, | |
| DeleteFileTool, | |
| FileSearchTool, | |
| ListDirectoryTool, | |
| MoveFileTool, | |
| ReadFileTool, | |
| WriteFileTool, | |
| ) | |
| from langchain_community.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper | |
| from langchain_community.tools.wikidata.tool import WikidataAPIWrapper, WikidataQueryRun | |
| from langchain_community.tools import DuckDuckGoSearchRun | |
| from langchain_community.tools import WikipediaQueryRun | |
| from langchain_community.utilities import WikipediaAPIWrapper | |
| import gradio as gr | |
| from gradio_client import Client | |
| from langchain.llms import HuggingFaceTextGenInference | |
| from langchain.chains import LLMChain | |
| from langchain.prompts import PromptTemplate | |
| import langgraph | |
| # Initialize the Qwen client | |
| qwen_client = Client("Qwen/Qwen2-0.5B") | |
| # Create a custom LLM class to use with LangChain | |
| class QwenLLM(HuggingFaceTextGenInference): | |
| def _call(self, prompt, stop=None): | |
| result = qwen_client.predict( | |
| query=prompt, | |
| history=[], | |
| system="You are a helpful assistant.", | |
| api_name="/model_chat" | |
| ) | |
| return result[0][1] # Extract the assistant's response | |
| # Initialize the LLM | |
| llm = QwenLLM( | |
| inference_server_url="https://your-inference-server-url", # This is a placeholder | |
| max_new_tokens=512, | |
| top_k=10, | |
| top_p=0.95, | |
| typical_p=0.95, | |
| temperature=0.1, | |
| repetition_penalty=1.03 | |
| ) | |
| # Create the LCel chain | |
| template = """You are a helpful AI assistant. Please respond to the following user input: | |
| User: {user_input}""" | |
| import torch | |
| from diffusers import ( | |
| DDIMScheduler, | |
| MotionAdapter, | |
| PIAPipeline, | |
| ) | |
| from diffusers.utils import export_to_gif, load_image | |
| import os | |
| os.system("pip install --upgrade pip") | |
| from typing import List | |
| from typing_extensions import TypedDict | |
| class ReWOO(TypedDict): | |
| task: str | |
| plan_string: str | |
| steps: List | |
| results: dict | |
| result: str | |
| #Planner | |
| #The planner prompts an LLM to generate a plan in the form of a task list. The arguments to each task are strings that may contain special variables (#E{0-9}+) that are used for variable substitution from other task results. | |
| #ReWOO workflow | |
| #Our example agent will have two tools: | |
| # Google - a search engine (in this case Tavily) | |
| # LLM - an LLM call to reason about previous outputs. | |
| #The LLM tool receives less of the prompt context and so can be more token-efficient than the ReACT paradigm. | |
| from langchain_openai import ChatOpenAI | |
| model = ChatOpenAI(model="gpt-4o") | |
| prompt = """For the following task, make plans that can solve the problem step by step. For each plan, indicate \ | |
| which external tool together with tool input to retrieve evidence. You can store the evidence into a \ | |
| variable #E that can be called by later tools. (Plan, #E1, Plan, #E2, Plan, ...) | |
| Tools can be one of the following: | |
| (1) Google[input]: Worker that searches results from Google. Useful when you need to find short | |
| and succinct answers about a specific topic. The input should be a search query. | |
| (2) LLM[input]: A pretrained LLM like yourself. Useful when you need to act with general | |
| world knowledge and common sense. Prioritize it when you are confident in solving the problem | |
| yourself. Input can be any instruction. | |
| For example, | |
| Task: Thomas, Toby, and Rebecca worked a total of 157 hours in one week. Thomas worked x | |
| hours. Toby worked 10 hours less than twice what Thomas worked, and Rebecca worked 8 hours | |
| less than Toby. How many hours did Rebecca work? | |
| Plan: Given Thomas worked x hours, translate the problem into algebraic expressions and solve | |
| with Wolfram Alpha. #E1 = WolframAlpha[Solve x + (2x − 10) + ((2x − 10) − 8) = 157] | |
| Plan: Find out the number of hours Thomas worked. #E2 = LLM[What is x, given #E1] | |
| Plan: Calculate the number of hours Rebecca worked. #E3 = Calculator[(2 ∗ #E2 − 10) − 8] | |
| Begin! | |
| Describe your plans with rich details. Each Plan should be followed by only one #E. | |
| Task: {task}""" | |
| task = "what is the exact hometown of the 2024 mens australian open winner" | |
| result = model.invoke(prompt.format(task=task)) | |
| print(result.content) | |
| #Plan: Use Google to search for the 2024 Australian Open winner. | |
| #E1 = Google[2024 Australian Open winner] | |
| #Plan: Retrieve the name of the 2024 Australian Open winner from the search results. | |
| #E2 = LLM[What is the name of the 2024 Australian Open winner, given #E1] | |
| #Plan: Use Google to search for the hometown of the 2024 Australian Open winner. | |
| #E3 = Google[hometown of 2024 Australian Open winner, given #E2] | |
| #Plan: Retrieve the hometown of the 2024 Australian Open winner from the search results. | |
| #E4 = LLM[What is the hometown of the 2024 Australian Open winner, given #E3] | |
| #Planner Node | |
| #To connect the planner to our graph, we will create a get_plan node that accepts the ReWOO state and returns with a state update for the steps and plan_string fields. | |
| import re | |
| from langchain_core.prompts import ChatPromptTemplate | |
| # Regex to match expressions of the form E#... = ...[...] | |
| regex_pattern = r"Plan:\s*(.+)\s*(#E\d+)\s*=\s*(\w+)\s*\[([^\]]+)\]" | |
| prompt_template = ChatPromptTemplate.from_messages([("user", prompt)]) | |
| planner = prompt_template | model | |
| def get_plan(state: ReWOO): | |
| task = state["task"] | |
| result = planner.invoke({"task": task}) | |
| # Find all matches in the sample text | |
| matches = re.findall(regex_pattern, result.content) | |
| return {"steps": matches, "plan_string": result.content} | |
| #Executor | |
| #The executor receives the plan and executes the tools in sequence. | |
| #Below, instantiate the search engine and define the tool execution node. | |
| from langchain_community.tools.tavily_search import TavilySearchResults | |
| search = TavilySearchResults() | |
| def _get_current_task(state: ReWOO): | |
| if "results" not in state or state["results"] is None: | |
| return 1 | |
| if len(state["results"]) == len(state["steps"]): | |
| return None | |
| else: | |
| return len(state["results"]) + 1 | |
| def tool_execution(state: ReWOO): | |
| """Worker node that executes the tools of a given plan.""" | |
| _step = _get_current_task(state) | |
| _, step_name, tool, tool_input = state["steps"][_step - 1] | |
| _results = (state["results"] or {}) if "results" in state else {} | |
| for k, v in _results.items(): | |
| tool_input = tool_input.replace(k, v) | |
| if tool == "Google": | |
| result = search.invoke(tool_input) | |
| elif tool == "LLM": | |
| result = model.invoke(tool_input) | |
| else: | |
| raise ValueError | |
| _results[step_name] = str(result) | |
| return {"results": _results} | |
| #Solver | |
| #The solver receives the full plan and generates the final response based on the responses of the tool calls from the worker. | |
| solve_prompt = """Solve the following task or problem. To solve the problem, we have made step-by-step Plan and \ | |
| retrieved corresponding Evidence to each Plan. Use them with caution since long evidence might \ | |
| contain irrelevant information. | |
| {plan} | |
| Now solve the question or task according to provided Evidence above. Respond with the answer | |
| directly with no extra words. | |
| Task: {task} | |
| Response:""" | |
| def solve(state: ReWOO): | |
| plan = "" | |
| for _plan, step_name, tool, tool_input in state["steps"]: | |
| _results = (state["results"] or {}) if "results" in state else {} | |
| for k, v in _results.items(): | |
| tool_input = tool_input.replace(k, v) | |
| step_name = step_name.replace(k, v) | |
| plan += f"Plan: {_plan}\n{step_name} = {tool}[{tool_input}]" | |
| prompt = solve_prompt.format(plan=plan, task=state["task"]) | |
| result = model.invoke(prompt) | |
| return {"result": result.content} | |
| #Define Graph | |
| #Our graph defines the workflow. Each of the planner, tool executor, and solver modules are added as nodes. | |
| def _route(state): | |
| _step = _get_current_task(state) | |
| if _step is None: | |
| # We have executed all tasks | |
| return "solve" | |
| else: | |
| # We are still executing tasks, loop back to the "tool" node | |
| return "tool" | |
| from langgraph.graph import END, StateGraph, START | |
| graph = StateGraph(ReWOO) | |
| graph.add_node("plan", get_plan) | |
| graph.add_node("tool", tool_execution) | |
| graph.add_node("solve", solve) | |
| graph.add_edge("plan", "tool") | |
| graph.add_edge("solve", END) | |
| graph.add_conditional_edges("tool", _route) | |
| graph.add_edge(START, "plan") | |
| app = graph.compile() | |
| for s in app.stream({"task": task}): | |
| print(s) | |
| print("---") | |