Spaces:
Sleeping
Sleeping
Update app_langgraph.py
Browse files- app_langgraph.py +15 -4
app_langgraph.py
CHANGED
|
@@ -4,7 +4,7 @@ from dotenv import load_dotenv
|
|
| 4 |
from langgraph.graph import START, StateGraph, MessagesState
|
| 5 |
from langgraph.prebuilt import tools_condition
|
| 6 |
from langgraph.prebuilt import ToolNode
|
| 7 |
-
|
| 8 |
from langchain_core.messages import SystemMessage, HumanMessage
|
| 9 |
from langchain_core.globals import set_debug
|
| 10 |
from langchain_groq import ChatGroq
|
|
@@ -15,6 +15,7 @@ from tools.math_tools import multiply, add, subtract, divide
|
|
| 15 |
# from langchain_community.vectorstores import SupabaseVectorStore
|
| 16 |
import json
|
| 17 |
from tools.multimodal_tools import extract_text, analyze_image_tool, analyze_audio_tool
|
|
|
|
| 18 |
|
| 19 |
# set_debug(True)
|
| 20 |
load_dotenv()
|
|
@@ -34,6 +35,7 @@ tools = [
|
|
| 34 |
|
| 35 |
def build_graph():
|
| 36 |
hf_token = os.getenv("HF_TOKEN")
|
|
|
|
| 37 |
# llm = HuggingFaceEndpoint(
|
| 38 |
# repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 39 |
# huggingfacehub_api_token=hf_token,
|
|
@@ -42,15 +44,24 @@ def build_graph():
|
|
| 42 |
# chat = ChatHuggingFace(llm=llm, verbose=True)
|
| 43 |
# llm_with_tools = chat.bind_tools(tools)
|
| 44 |
|
| 45 |
-
llm = ChatGroq(model="qwen-qwq-32b", temperature=0)
|
| 46 |
-
llm_with_tools = llm.bind_tools(tools)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
def assistant(state: MessagesState):
|
| 49 |
sys_msg = "You are a helpful assistant with access to tools. Understand user requests accurately. Use your tools when needed to answer effectively. Strictly follow all user instructions and constraints." \
|
| 50 |
"Pay attention: your output needs to contain only the final answer without any reasoning since it will be strictly evaluated against a dataset which contains only the specific response." \
|
| 51 |
"Your final output needs to be just the string or integer containing the answer, not an array or technical stuff."
|
| 52 |
return {
|
| 53 |
-
"messages": [
|
| 54 |
}
|
| 55 |
|
| 56 |
## The graph
|
|
|
|
| 4 |
from langgraph.graph import START, StateGraph, MessagesState
|
| 5 |
from langgraph.prebuilt import tools_condition
|
| 6 |
from langgraph.prebuilt import ToolNode
|
| 7 |
+
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace, HuggingFaceEmbeddings
|
| 8 |
from langchain_core.messages import SystemMessage, HumanMessage
|
| 9 |
from langchain_core.globals import set_debug
|
| 10 |
from langchain_groq import ChatGroq
|
|
|
|
| 15 |
# from langchain_community.vectorstores import SupabaseVectorStore
|
| 16 |
import json
|
| 17 |
from tools.multimodal_tools import extract_text, analyze_image_tool, analyze_audio_tool
|
| 18 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 19 |
|
| 20 |
# set_debug(True)
|
| 21 |
load_dotenv()
|
|
|
|
| 35 |
|
| 36 |
def build_graph():
|
| 37 |
hf_token = os.getenv("HF_TOKEN")
|
| 38 |
+
api_key = os.getenv("GEMINI_API_KEY")
|
| 39 |
# llm = HuggingFaceEndpoint(
|
| 40 |
# repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 41 |
# huggingfacehub_api_token=hf_token,
|
|
|
|
| 44 |
# chat = ChatHuggingFace(llm=llm, verbose=True)
|
| 45 |
# llm_with_tools = chat.bind_tools(tools)
|
| 46 |
|
| 47 |
+
# llm = ChatGroq(model="qwen-qwq-32b", temperature=0)
|
| 48 |
+
# llm_with_tools = llm.bind_tools(tools)
|
| 49 |
+
|
| 50 |
+
chat = ChatGoogleGenerativeAI(
|
| 51 |
+
model= "gemini-2.5-pro-preview-05-06",
|
| 52 |
+
temperature=0,
|
| 53 |
+
max_retries=2,
|
| 54 |
+
google_api_key=api_key,
|
| 55 |
+
thinking_budget= 0
|
| 56 |
+
)
|
| 57 |
+
chat_with_tools = chat.bind_tools(tools)
|
| 58 |
|
| 59 |
def assistant(state: MessagesState):
|
| 60 |
sys_msg = "You are a helpful assistant with access to tools. Understand user requests accurately. Use your tools when needed to answer effectively. Strictly follow all user instructions and constraints." \
|
| 61 |
"Pay attention: your output needs to contain only the final answer without any reasoning since it will be strictly evaluated against a dataset which contains only the specific response." \
|
| 62 |
"Your final output needs to be just the string or integer containing the answer, not an array or technical stuff."
|
| 63 |
return {
|
| 64 |
+
"messages": [chat_with_tools.invoke([sys_msg] + state["messages"])],
|
| 65 |
}
|
| 66 |
|
| 67 |
## The graph
|