Leon4gr45 commited on
Commit
0b9c746
·
verified ·
1 Parent(s): 5cfd79f

Upload folder using huggingface_hub

Browse files
app.py CHANGED
@@ -13,17 +13,16 @@ print("-----------------------------")
13
  # Ensure the multi_agent_system is in the Python path
14
  sys.path.append(os.getcwd())
15
  from multi_agent_system.orchestrator.agent import main_orchestration_workflow
16
- from multi_agent_system.monitor_agent.agent import monitor_session
17
 
18
- # --- Event 1: Streaming UI Updates (Pure Generator) ---
19
  def stream_orchestrator(msg, history, jules_key, hf_token, blablador_key):
20
  """
21
- A "pure" generator that only handles streaming UI updates.
22
- It returns the data needed for the background task.
23
  """
24
  history.append([msg, ""])
25
- yield history, None # Yield initial history, monitor_data is None for now
26
 
 
27
  api_keys = {
28
  "JULES_API_KEY": os.environ.get("JULES_API_KEY", jules_key),
29
  "HF_TOKEN": os.environ.get("HF_TOKEN", hf_token),
@@ -32,54 +31,24 @@ def stream_orchestrator(msg, history, jules_key, hf_token, blablador_key):
32
  for key, value in api_keys.items():
33
  if not value:
34
  history[-1][1] = f"Error: {key} is not set. Please provide it as a Space secret or in the UI."
35
- yield history, None
36
  return
37
  os.environ[key] = value
38
 
39
  workflow_generator = main_orchestration_workflow()
40
- monitor_data = None
41
  final_bot_message = ""
42
 
43
- try:
44
- while True:
45
- update = next(workflow_generator)
46
- final_bot_message += update
47
- history[-1][1] = final_bot_message
48
- yield history, None
49
- except StopIteration as e:
50
- monitor_data = e.value
51
 
 
52
  for key in api_keys:
53
  if key in os.environ:
54
  del os.environ[key]
55
 
56
- if monitor_data:
57
- history[-1][1] = final_bot_message + "\n\n**Orchestration complete. Starting background monitoring...**"
58
- else:
59
- history[-1][1] = final_bot_message + "\n\n**Workflow finished with an error. Could not start monitor.**"
60
-
61
- yield history, monitor_data
62
-
63
-
64
- # --- Event 2: Background Task (Triggered by .then()) ---
65
- def trigger_background_monitor(monitor_data):
66
- """
67
- Takes the data from the first event and spawns the monitor in a background thread.
68
- This function does not interact with the Gradio UI.
69
- """
70
- if monitor_data:
71
- print("--- Spawning Background Monitor Thread ---")
72
- monitor_thread = threading.Thread(
73
- target=monitor_session,
74
- kwargs=monitor_data,
75
- daemon=True
76
- )
77
- monitor_thread.start()
78
- print("--- Background Monitor Thread Started ---")
79
- else:
80
- print("--- No monitor data provided; skipping background task. ---")
81
- # This function has no UI output, so it returns None implicitly.
82
-
83
  def disable_ui():
84
  return gr.Textbox(interactive=False), gr.Button(interactive=False)
85
 
@@ -91,9 +60,6 @@ with gr.Blocks(title="Desk Agent") as demo:
91
  gr.Markdown("# Desk Agent Orchestrator")
92
  gr.Markdown("A conversational interface to a multi-agent workflow system. Press 'Start Workflow' to begin.")
93
 
94
- # Hidden gr.State component to pass data between events
95
- monitor_data_state = gr.State()
96
-
97
  chatbot = gr.Chatbot(label="Agent Conversation", height=500)
98
  msg_input = gr.Textbox(label="Your Message", placeholder="Press 'Start Workflow' to begin...")
99
 
@@ -106,17 +72,13 @@ with gr.Blocks(title="Desk Agent") as demo:
106
  start_button = gr.Button("Start Workflow", variant="primary")
107
  clear_button = gr.ClearButton([msg_input, chatbot])
108
 
109
- # --- Chained Event Handling (Pattern 1) ---
110
  start_button.click(
111
  fn=disable_ui, outputs=[msg_input, start_button]
112
  ).then(
113
  fn=stream_orchestrator,
114
  inputs=[msg_input, chatbot, jules_key_input, hf_token_input, blablador_key_input],
115
- outputs=[chatbot, monitor_data_state]
116
- ).then(
117
- fn=trigger_background_monitor,
118
- inputs=[monitor_data_state],
119
- outputs=[]
120
  ).then(
121
  fn=enable_ui, outputs=[msg_input, start_button]
122
  )
@@ -126,15 +88,11 @@ with gr.Blocks(title="Desk Agent") as demo:
126
  ).then(
127
  fn=stream_orchestrator,
128
  inputs=[msg_input, chatbot, jules_key_input, hf_token_input, blablador_key_input],
129
- outputs=[chatbot, monitor_data_state]
130
- ).then(
131
- fn=trigger_background_monitor,
132
- inputs=[monitor_data_state],
133
- outputs=[]
134
  ).then(
135
  fn=enable_ui, outputs=[msg_input, start_button]
136
  )
137
 
138
 
139
  if __name__ == "__main__":
140
- demo.launch()
 
13
  # Ensure the multi_agent_system is in the Python path
14
  sys.path.append(os.getcwd())
15
  from multi_agent_system.orchestrator.agent import main_orchestration_workflow
 
16
 
17
+ # --- Streaming UI Updates ---
18
  def stream_orchestrator(msg, history, jules_key, hf_token, blablador_key):
19
  """
20
+ A generator that handles streaming UI updates from the orchestrator workflow.
 
21
  """
22
  history.append([msg, ""])
23
+ yield history
24
 
25
+ # Set API keys from UI or environment variables
26
  api_keys = {
27
  "JULES_API_KEY": os.environ.get("JULES_API_KEY", jules_key),
28
  "HF_TOKEN": os.environ.get("HF_TOKEN", hf_token),
 
31
  for key, value in api_keys.items():
32
  if not value:
33
  history[-1][1] = f"Error: {key} is not set. Please provide it as a Space secret or in the UI."
34
+ yield history
35
  return
36
  os.environ[key] = value
37
 
38
  workflow_generator = main_orchestration_workflow()
 
39
  final_bot_message = ""
40
 
41
+ # Stream updates from the workflow
42
+ for update in workflow_generator:
43
+ final_bot_message += update
44
+ history[-1][1] = final_bot_message
45
+ yield history
 
 
 
46
 
47
+ # Clean up environment variables
48
  for key in api_keys:
49
  if key in os.environ:
50
  del os.environ[key]
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  def disable_ui():
53
  return gr.Textbox(interactive=False), gr.Button(interactive=False)
54
 
 
60
  gr.Markdown("# Desk Agent Orchestrator")
61
  gr.Markdown("A conversational interface to a multi-agent workflow system. Press 'Start Workflow' to begin.")
62
 
 
 
 
63
  chatbot = gr.Chatbot(label="Agent Conversation", height=500)
64
  msg_input = gr.Textbox(label="Your Message", placeholder="Press 'Start Workflow' to begin...")
65
 
 
72
  start_button = gr.Button("Start Workflow", variant="primary")
73
  clear_button = gr.ClearButton([msg_input, chatbot])
74
 
75
+ # --- Event Handling ---
76
  start_button.click(
77
  fn=disable_ui, outputs=[msg_input, start_button]
78
  ).then(
79
  fn=stream_orchestrator,
80
  inputs=[msg_input, chatbot, jules_key_input, hf_token_input, blablador_key_input],
81
+ outputs=[chatbot]
 
 
 
 
82
  ).then(
83
  fn=enable_ui, outputs=[msg_input, start_button]
84
  )
 
88
  ).then(
89
  fn=stream_orchestrator,
90
  inputs=[msg_input, chatbot, jules_key_input, hf_token_input, blablador_key_input],
91
+ outputs=[chatbot]
 
 
 
 
92
  ).then(
93
  fn=enable_ui, outputs=[msg_input, start_button]
94
  )
95
 
96
 
97
  if __name__ == "__main__":
98
+ demo.launch(share=True)
multi_agent_system/jules_agent_client/stream_client.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+
4
+ JULES_API_ENDPOINT = "https://jules.googleapis.com/v1alpha"
5
+
6
+ class JulesAgentStream:
7
+ """A client for handling streaming communication with the Jules API."""
8
+
9
+ def __init__(self, api_key, session_id):
10
+ """
11
+ Initializes the stream client.
12
+
13
+ Args:
14
+ api_key (str): The Jules API key for authentication.
15
+ session_id (str): The ID of the session to stream.
16
+ """
17
+ self.api_key = api_key
18
+ self.session_id = session_id
19
+ self.url = f'{JULES_API_ENDPOINT}/sessions/{self.session_id}:stream'
20
+ self.headers = {
21
+ 'X-Goog-Api-Key': self.api_key,
22
+ 'Accept': 'text/event-stream'
23
+ }
24
+
25
+ def __iter__(self):
26
+ """
27
+ Yields events from the Jules API stream.
28
+ """
29
+ try:
30
+ with requests.get(self.url, headers=self.headers, stream=True, timeout=300) as response:
31
+ response.raise_for_status()
32
+ for line in response.iter_lines():
33
+ if line:
34
+ decoded_line = line.decode('utf-8')
35
+ if decoded_line.startswith('data: '):
36
+ try:
37
+ yield json.loads(decoded_line[len('data: '):])
38
+ except json.JSONDecodeError:
39
+ # Yield raw data if JSON parsing fails
40
+ yield {"error": "JSONDecodeError", "raw_data": decoded_line}
41
+ except requests.exceptions.RequestException as e:
42
+ yield {"error": f"RequestException: {str(e)}"}
43
+
44
+ if __name__ == '__main__':
45
+ import os
46
+ # This is an example of how to use the stream client.
47
+ # You must set the JULES_API_KEY and a JULES_SESSION_ID environment variables.
48
+ jules_key = os.environ.get("JULES_API_KEY")
49
+ session_id = os.environ.get("JULES_SESSION_ID")
50
+
51
+ if not jules_key or not session_id:
52
+ print("Error: JULES_API_KEY and JULES_SESSION_ID environment variables must be set.")
53
+ else:
54
+ print(f"--- Starting to stream session: {session_id} ---")
55
+ stream_client = JulesAgentStream(api_key=jules_key, session_id=session_id)
56
+ for event in stream_client:
57
+ print(json.dumps(event, indent=2))
multi_agent_system/orchestrator/agent.py CHANGED
@@ -6,7 +6,8 @@ import sys
6
  # Add sibling directories to the Python path
7
  sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
8
 
9
- from jules_agent_client.agent import create_session, list_sources, send_message_to_session
 
10
  from llm_client import get_llm_response_stream
11
 
12
  # Constants
@@ -47,8 +48,7 @@ def generate_conversational_update(action_description):
47
 
48
  def main_orchestration_workflow():
49
  """
50
- A generator function that executes the main workflow, yields conversational updates,
51
- and returns the necessary data to start the monitor.
52
  """
53
  yield generate_conversational_update("Starting the orchestration workflow and checking for API keys.")
54
 
@@ -115,18 +115,20 @@ def main_orchestration_workflow():
115
  session_id = session_info['name'].split('/')[-1]
116
  yield generate_conversational_update(f"Success! The coding session with Jules is active (ID: {session_id}).")
117
 
118
- # Step 3: Prepare for monitoring
119
- hf_repo_to_monitor = plan_data.get("hf_repo")
120
- if not hf_repo_to_monitor:
121
- yield "The developer's plan was missing the Hugging Face repository to monitor."
122
- return None
123
 
124
- yield generate_conversational_update(f"I'm now handing off to the Monitor Agent to watch the deployment at '{hf_repo_to_monitor}'.")
 
 
 
 
 
 
125
 
126
- # Return the necessary data for the monitor to be started in a separate thread
127
- return {
128
- "session_id": session_id,
129
- "hf_repo_id": hf_repo_to_monitor,
130
- "jules_api_key": jules_api_key,
131
- "hf_token": hf_token
132
- }
 
6
  # Add sibling directories to the Python path
7
  sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
8
 
9
+ from jules_agent_client.agent import create_session, list_sources
10
+ from jules_agent_client.stream_client import JulesAgentStream
11
  from llm_client import get_llm_response_stream
12
 
13
  # Constants
 
48
 
49
  def main_orchestration_workflow():
50
  """
51
+ A generator function that executes the main workflow and yields conversational updates.
 
52
  """
53
  yield generate_conversational_update("Starting the orchestration workflow and checking for API keys.")
54
 
 
115
  session_id = session_info['name'].split('/')[-1]
116
  yield generate_conversational_update(f"Success! The coding session with Jules is active (ID: {session_id}).")
117
 
118
+ yield generate_conversational_update(f"I'm now streaming live updates from the Jules session...")
 
 
 
 
119
 
120
+ # Step 3: Stream Jules' activities
121
+ stream_client = JulesAgentStream(api_key=jules_api_key, session_id=session_id)
122
+ for event in stream_client:
123
+ # Here you can parse the event and format it into a user-friendly message
124
+ # For now, we'll just yield the raw event as a string.
125
+ # A more sophisticated implementation would parse the event type and content.
126
+ yield f"\n\n**Jules Update:**\n```json\n{json.dumps(event, indent=2)}\n```"
127
 
128
+ # The workflow concludes after the stream ends.
129
+ # We can still use the hf_repo for a final message.
130
+ hf_repo_to_monitor = plan_data.get("hf_repo")
131
+ if hf_repo_to_monitor:
132
+ yield f"\n\n**Workflow complete.** You can check the final deployment at '{hf_repo_to_monitor}'."
133
+ else:
134
+ yield "\n\n**Workflow complete.**"
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
- gradio
 
2
  huggingface-hub==0.21.4
3
  requests
4
  openai
 
1
+ gradio==4.19.0
2
+ gradio_client==0.10.0
3
  huggingface-hub==0.21.4
4
  requests
5
  openai