AliA1997 commited on
Commit
dd75c3c
·
1 Parent(s): 497d544

Simplified langgraph package.

Browse files
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ SPACE_ID=
__pycache__/init_agent.cpython-313.pyc ADDED
Binary file (4.88 kB). View file
 
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import os
2
  import gradio as gr
3
  import requests
4
- import inspect
5
  from typing import Optional, Any
6
  import pandas as pd
7
  from init_agent import build_workflow
@@ -14,22 +13,18 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
14
  # --- Basic Agent Definition ---
15
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
16
  class BasicAgent:
 
17
  workflow: Optional[Any]
18
  def __init__(self):
19
  print("BasicAgent initialized.")
20
  self.workflow = build_workflow()
 
21
  def __call__(self, question: str) -> str:
22
  print(f"Agent received question (first 50 chars): {question[:50]}...")
23
- # fixed_answer = "This is a default answer."
24
- # print(f"Agent returning fixed answer: {fixed_answer}")
25
- workflow_response = self.workflow.invoke({
26
- "messages": [
27
- HumanMessage(content="What does this code do?: var a = 10; var b = 20;")
28
- ],
29
- "classification": "not coding",
30
- "ai_agent": None
31
- })
32
- return workflow_response["messages"][-1].content
33
 
34
  def run_and_submit_all( profile: gr.OAuthProfile | None):
35
  """
 
1
  import os
2
  import gradio as gr
3
  import requests
 
4
  from typing import Optional, Any
5
  import pandas as pd
6
  from init_agent import build_workflow
 
13
  # --- Basic Agent Definition ---
14
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
15
  class BasicAgent:
16
+ """A langgraph agent."""
17
  workflow: Optional[Any]
18
  def __init__(self):
19
  print("BasicAgent initialized.")
20
  self.workflow = build_workflow()
21
+
22
  def __call__(self, question: str) -> str:
23
  print(f"Agent received question (first 50 chars): {question[:50]}...")
24
+ messages = [HumanMessage(content=question)]
25
+ result = self.workflow.invoke({"messages": messages})
26
+ answer = result['messages'][-1].content
27
+ return answer # kein [14:] mehr nötig!
 
 
 
 
 
 
28
 
29
  def run_and_submit_all( profile: gr.OAuthProfile | None):
30
  """
init_agent.py CHANGED
@@ -1,134 +1,148 @@
1
  import os
2
  from transformers import pipeline
3
- from huggingface_hub import login
4
  from typing import Annotated, TypedDict, Optional, Any
 
5
  from langgraph.graph import StateGraph, START, END
6
  from langgraph.graph.message import add_messages
7
- from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
8
 
9
- from langchain_core.messages import AnyMessage, HumanMessage
 
10
  from langchain_community.tools import DuckDuckGoSearchRun
11
-
12
  from langchain_core.tools import Tool
13
 
14
- hf_token = os.environ.get('HF_TOKEN')
15
- if hf_token:
16
- login(token=hf_token)
17
-
18
- def init_classifier():
19
- classifier = pipeline("zero-shot-classification", model='cross-encoder/nli-distilroberta-base')
20
- return classifier
21
 
22
 
23
- class CurrentAgent():
24
- current_llm: HuggingFaceEndpoint
25
- current_chat: ChatHuggingFace
26
- def __init__(self):
27
- self.current_llm = HuggingFaceEndpoint(
28
- repo_id="Qwen/Qwen3-VL-8B-Instruct",
29
- huggingfacehub_api_token=hf_token
30
  )
31
- self.current_chat = ChatHuggingFace(llm=self.current_llm, verbose=True, tools=[DuckDuckGoSearchRun()])
32
 
33
- def update_llm(self, model_id: str, hf_token):
34
- self.current_llm = HuggingFaceEndpoint(
35
- repo_id=model_id,
 
 
 
 
 
36
  huggingfacehub_api_token=hf_token
37
  )
38
- self.current_chat = ChatHuggingFace(llm=self.current_llm, verbose=True, tools=[DuckDuckGoSearchRun()])
39
-
40
-
41
- # Define a custom agent state:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  class AgentState(TypedDict):
43
- ai_agent: Optional[CurrentAgent]
44
- classification: str
45
- messages: Annotated[list[AnyMessage], add_messages]
46
-
47
-
48
- tools = [DuckDuckGoSearchRun()]
49
 
50
 
 
 
 
51
  def classify(state: AgentState) -> AgentState:
52
- classifier = init_classifier()
53
- message_to_send = state['messages'][-1].content
54
- candidate_labels = ["coding", "not coding"]
55
- classifier_res = classifier(message_to_send, candidate_labels)
56
- highest_score_label = classifier_res['labels'][0]
57
- highest_score = classifier_res['scores'][0]
58
 
59
- new_classification = 'not coding'
60
 
61
- if(state['ai_agent'] is None):
62
- state['ai_agent'] = CurrentAgent()
63
 
64
- if(highest_score_label == 'coding' and highest_score > 0.6):
65
- new_classification = 'coding'
 
 
 
66
 
67
- return {
68
- "ai_agent": state['ai_agent'],
69
- "classification": new_classification,
70
- "messages": state['messages']
71
- }
72
 
 
 
 
73
  def general_assistant(state: AgentState) -> AgentState:
74
- if(state['ai_agent'] is None):
75
- state['ai_agent'] = CurrentAgent()
76
 
77
- updated_messages = [
78
- state['ai_agent'].current_chat.invoke(state['messages'])
79
- ]
 
 
 
 
80
 
81
- return {
82
- "ai_agent": state['ai_agent'],
83
- "classification": state['classification'],
84
- "messages": updated_messages
85
- }
86
 
 
 
 
87
  def code_assistant(state: AgentState) -> AgentState:
88
- if(state['ai_agent'] is None):
89
- state['ai_agent'] = CurrentAgent()
90
 
91
- state['ai_agent'].update_llm('Qwen/Qwen2.5-Coder-32B-Instruct', hf_token)
92
- updated_messages = [
93
- state['ai_agent'].current_chat.invoke(state['messages'])
94
- ]
95
 
96
- return {
97
- "ai_agent": state['ai_agent'],
98
- "classification": state['classification'],
99
- "messages": updated_messages
100
- }
101
 
102
- def route(state: AgentState):
103
- mode = state['classification']
104
- if mode == "coding":
105
- return "code_assistant"
106
- else:
107
- return "general_assistant"
108
 
 
 
 
 
 
109
 
110
 
 
 
 
111
  def build_workflow() -> Any:
112
- graph_builder = StateGraph(AgentState)
113
- # Define the nodes:
114
- graph_builder.add_node("classify", classify)
115
- graph_builder.add_node("general_assistant", general_assistant)
116
- graph_builder.add_node("code_assistant", code_assistant)
117
-
118
-
119
- ### Define Edges
120
- # The start node, just return the result using chat api with current messages state.
121
- graph_builder.add_edge(START, "classify")
122
- # Add a conditional edge
123
- graph_builder.add_conditional_edges(
124
- "classify",
125
- route,
126
- {
127
- "general_assistant": "general_assistant",
128
- "code_assistant": "code_assistant"
129
- }
130
- )
131
 
132
- graph_builder.add_edge("general_assistant", END)
133
- graph_builder.add_edge("code_assistant", END)
134
- return graph_builder.compile()
 
1
  import os
2
  from transformers import pipeline
 
3
  from typing import Annotated, TypedDict, Optional, Any
4
+
5
  from langgraph.graph import StateGraph, START, END
6
  from langgraph.graph.message import add_messages
 
7
 
8
+ from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
9
+ from langchain_core.messages import AnyMessage
10
  from langchain_community.tools import DuckDuckGoSearchRun
 
11
  from langchain_core.tools import Tool
12
 
13
+ hf_token = os.environ.get("HF_TOKEN")
 
 
 
 
 
 
14
 
15
 
16
+ # -----------------------------
17
+ # CLASSIFIER
18
+ # -----------------------------
19
+ def init_classifier():
20
+ return pipeline(
21
+ "zero-shot-classification",
22
+ model="cross-encoder/nli-distilroberta-base"
23
  )
 
24
 
25
+
26
+ # -----------------------------
27
+ # CODE LLM TOOL
28
+ # -----------------------------
29
+ def run_code_llm(prompt: str) -> str:
30
+ """Call the coder model directly as a tool."""
31
+ coder = HuggingFaceEndpoint(
32
+ repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
33
  huggingfacehub_api_token=hf_token
34
  )
35
+ chat = ChatHuggingFace(llm=coder, verbose=True)
36
+ result = chat.invoke([{"role": "user", "content": prompt}])
37
+ return result.content
38
+
39
+
40
+ code_llm_tool = Tool(
41
+ name="code_llm",
42
+ description="Use this tool to answer coding or programming questions.",
43
+ func=run_code_llm
44
+ )
45
+
46
+
47
+ # -----------------------------
48
+ # AGENT WRAPPER
49
+ # -----------------------------
50
+ class CurrentAgent:
51
+ def __init__(self):
52
+ self.current_llm = HuggingFaceEndpoint(
53
+ repo_id="Qwen/Qwen3-VL-8B-Instruct",
54
+ huggingfacehub_api_token=hf_token
55
+ )
56
+ self.current_chat = ChatHuggingFace(
57
+ llm=self.current_llm,
58
+ verbose=True,
59
+ tools=[DuckDuckGoSearchRun(), code_llm_tool]
60
+ )
61
+
62
+
63
+ # -----------------------------
64
+ # STATE
65
+ # -----------------------------
66
  class AgentState(TypedDict):
67
+ ai_agent: Optional[CurrentAgent]
68
+ classification: str
69
+ messages: Annotated[list[AnyMessage], add_messages]
 
 
 
70
 
71
 
72
+ # -----------------------------
73
+ # CLASSIFICATION NODE
74
+ # -----------------------------
75
  def classify(state: AgentState) -> AgentState:
76
+ classifier = init_classifier()
77
+ message = state["messages"][-1].content
78
+
79
+ result = classifier(message, ["coding", "not coding"])
80
+ label = result["labels"][0]
81
+ score = result["scores"][0]
82
 
83
+ new_class = "coding" if (label == "coding" and score > 0.6) else "not coding"
84
 
85
+ if state["ai_agent"] is None:
86
+ state["ai_agent"] = CurrentAgent()
87
 
88
+ return {
89
+ "ai_agent": state["ai_agent"],
90
+ "classification": new_class,
91
+ "messages": state["messages"]
92
+ }
93
 
 
 
 
 
 
94
 
95
+ # -----------------------------
96
+ # GENERAL ASSISTANT NODE
97
+ # -----------------------------
98
  def general_assistant(state: AgentState) -> AgentState:
99
+ if state["ai_agent"] is None:
100
+ state["ai_agent"] = CurrentAgent()
101
 
102
+ updated = [state["ai_agent"].current_chat.invoke(state["messages"])]
103
+
104
+ return {
105
+ "ai_agent": state["ai_agent"],
106
+ "classification": state["classification"],
107
+ "messages": updated
108
+ }
109
 
 
 
 
 
 
110
 
111
+ # -----------------------------
112
+ # CODE ASSISTANT NODE
113
+ # -----------------------------
114
  def code_assistant(state: AgentState) -> AgentState:
115
+ if state["ai_agent"] is None:
116
+ state["ai_agent"] = CurrentAgent()
117
 
118
+ # The agent will automatically call the code_llm tool
119
+ updated = [state["ai_agent"].current_chat.invoke(state["messages"])]
 
 
120
 
121
+ return {
122
+ "ai_agent": state["ai_agent"],
123
+ "classification": state["classification"],
124
+ "messages": updated
125
+ }
126
 
 
 
 
 
 
 
127
 
128
+ # -----------------------------
129
+ # ROUTER
130
+ # -----------------------------
131
+ def route(state: AgentState):
132
+ return "code_assistant" if state["classification"] == "coding" else "general_assistant"
133
 
134
 
135
+ # -----------------------------
136
+ # WORKFLOW
137
+ # -----------------------------
138
  def build_workflow() -> Any:
139
+ graph = StateGraph(AgentState)
140
+
141
+ graph.add_node("classify", classify)
142
+ graph.add_node("general_assistant", general_assistant)
143
+
144
+ graph.add_edge(START, "classify")
145
+ graph.add_edge("classify", "general_assistant")
146
+ graph.add_edge("general_assistant", END)
 
 
 
 
 
 
 
 
 
 
 
147
 
148
+ return graph.compile()
 
 
metadata copy.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
metadata.jsonl CHANGED
The diff for this file is too large to render. See raw diff