suzannejin commited on
Commit
80f7bc0
·
unverified ·
2 Parent(s): 50269b68fa9d42

Merge pull request #6 from mathysgrapotte/modal

Browse files
Files changed (3) hide show
  1. main.py +86 -42
  2. modal.py +0 -89
  3. tools/meta_yml_tools.py +17 -0
main.py CHANGED
@@ -2,25 +2,42 @@ from smolagents import CodeAgent, LiteLLMModel
2
  from smolagents.tools import ToolCollection
3
  import gradio as gr
4
  import requests
5
- from tools.meta_yml_tools import get_meta_yml_file, extract_tools_from_meta_json, extract_information_from_meta_json, extract_module_name_description, get_biotools_response
 
 
 
 
6
 
7
- def main(module_name):
8
- meta_yml = get_meta_yml_file(module_name=module_name)
9
- module_info = extract_module_name_description(meta_file=meta_yml)
10
- module_tools = extract_tools_from_meta_json(meta_file=meta_yml)
11
- # TODO: agent to choose the right tool
12
- # Only call the agent if there is more than one tool, otherwise get the first name
13
- first_prompt = f"""
14
- The module {module_info[0]} with desciption '{module_info[1]}' contains a series of tools.
15
- Find the tool that best describes the module. Return only one tool. Return the name.
16
- This is the list of tools:
17
- {"\n\t".join(f"{tool[0]}: {tool[1]}" for tool in module_tools)}
18
- """
19
- tool_name = "fastqc" # this would be the answer of the first agent
20
- meta_info = extract_information_from_meta_json(meta_file=meta_yml, tool_name=tool_name)
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  def chat_with_agent(message, history):
23
- """Initialize MCP client for each request to avoid connection issues"""
 
 
24
  try:
25
  with ToolCollection.from_mcp(
26
  {"url": "https://notredameslab-nf-ontology.hf.space/gradio_api/mcp/sse", "transport": "sse"},
@@ -50,41 +67,68 @@ def chat_with_agent(message, history):
50
 
51
  except Exception as e:
52
  return f"❌ Error: {e}\nType: {type(e).__name__}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- # TODO: placeholder function
55
- def fetch_meta_yml(module_name):
56
- # Adjust the URL or path to your actual source of nf-core modules
57
- base_url = f"https://raw.githubusercontent.com/nf-core/modules/refs/heads/master/modules/nf-core/{module_name}/meta.yml"
58
- try:
59
- response = requests.get(base_url)
60
- response.raise_for_status()
61
- content = response.text
62
-
63
- # Save for download
64
- with open("meta.yml", "w") as f:
65
- f.write(content)
66
-
67
- return content, "meta.yml"
68
- except Exception as e:
69
- return f"Error: Could not retrieve meta.yml for module '{module_name}'\n{e}", None
70
-
71
- if __name__ == "__main__":
72
  with gr.Blocks() as demo:
73
  gr.Markdown("### 🔍 Update an nf-core module `meta.yml` file by adding EDAM ontology terms.")
74
 
75
- with gr.Row():
76
- module_input = gr.Textbox(label="nf-core Module Name", placeholder="e.g. fastqc")
77
 
 
78
  fetch_btn = gr.Button("Update meta.yml")
79
 
80
- with gr.Row():
81
- meta_output = gr.Textbox(label="meta.yml content", lines=20)
82
- download_button = gr.File(label="Download meta.yml")
83
 
 
84
  fetch_btn.click(
85
- fn=fetch_meta_yml, # TODO: change to final function
86
  inputs=module_input,
87
- outputs=[meta_output, download_button]
88
  )
 
 
89
 
90
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from smolagents.tools import ToolCollection
3
  import gradio as gr
4
  import requests
5
+ import modal
6
+ import sys
7
+ import subprocess
8
+ import time
9
+ from .tools.meta_yml_tools import fetch_meta_yml,get_meta_yml_file, extract_tools_from_meta_json, extract_information_from_meta_json, extract_module_name_description,get_biotools_response
10
 
11
+ # Define the custom image
12
+ ollama_image = (
13
+ modal.Image.debian_slim()
14
+ .apt_install("curl", "gnupg", "software-properties-common", "procps")
15
+ .run_commands("curl -fsSL https://ollama.com/install.sh | sh")
16
+ .run_commands(
17
+ "bash -c 'ollama serve >/dev/null 2>&1 & "
18
+ "PID=$!; "
19
+ "sleep 10 && "
20
+ "ollama pull devstral:latest && "
21
+ "ollama pull qwen3:0.6b && "
22
+ "kill $PID'"
23
+ )
24
+ .pip_install(
25
+ "fastmcp>=2.6.1",
26
+ "gradio[mcp]>=5.0.0",
27
+ "huggingface_hub[mcp]>=0.32.2",
28
+ "mcp>=1.9.2",
29
+ "smolagents[litellm,mcp]>=1.17.0",
30
+ "textblob>=0.19.0",
31
+ )
32
+ )
33
+
34
+ # Initialize the Modal app with the custom image
35
+ app = modal.App("agent-ontology", image=ollama_image)
36
 
37
  def chat_with_agent(message, history):
38
+ """ Function to handle chat messages and interact with the agent.
39
+ This function creates a new MCP connection for each request, allowing the agent to use tools from the MCP server.
40
+ """
41
  try:
42
  with ToolCollection.from_mcp(
43
  {"url": "https://notredameslab-nf-ontology.hf.space/gradio_api/mcp/sse", "transport": "sse"},
 
67
 
68
  except Exception as e:
69
  return f"❌ Error: {e}\nType: {type(e).__name__}"
70
+
71
+ def run_multi_agent(module_name):
72
+ meta_yml = get_meta_yml_file(module_name=module_name)
73
+ module_info = extract_module_name_description(meta_file=meta_yml)
74
+ module_tools = extract_tools_from_meta_json(meta_file=meta_yml)
75
+ # TODO: agent to choose the right tool
76
+ # Only call the agent if there is more than one tool, otherwise get the first name
77
+ first_prompt = f"""
78
+ The module {module_info[0]} with desciption '{module_info[1]}' contains a series of tools.
79
+ Find the tool that best describes the module. Return only one tool. Return the name.
80
+ This is the list of tools:
81
+ {"\n\t".join(f"{tool[0]}: {tool[1]}" for tool in module_tools)}
82
+ """
83
+ tool_name = "fastqc" # this would be the answer of the first agent
84
+ meta_info = extract_information_from_meta_json(meta_file=meta_yml, tool_name=tool_name)
85
+ return(meta_info)
86
 
87
+ def run_interface():
88
+ """ Function to run the agent with a Gradio interface.
89
+ This function sets up the Gradio interface and launches it.
90
+ """
91
+ # create the Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  with gr.Blocks() as demo:
93
  gr.Markdown("### 🔍 Update an nf-core module `meta.yml` file by adding EDAM ontology terms.")
94
 
95
+ # create the input textbox for the nf-core module name
96
+ module_input = gr.Textbox(label="nf-core Module Name", placeholder="e.g. fastqc")
97
 
98
+ # create the button to fetch the meta.yml file
99
  fetch_btn = gr.Button("Update meta.yml")
100
 
101
+ # create the output textbox for the meta.yml content and a download button
102
+ meta_output = gr.Textbox(label="meta.yml content", lines=20)
103
+ download_button = gr.File(label="Download meta.yml")
104
 
105
+ # set the function to run when the button is clicked
106
  fetch_btn.click(
107
+ fn=run_multi_agent, # TODO: change to final function
108
  inputs=module_input,
109
+ outputs=[meta_output]
110
  )
111
+
112
+ demo.launch(share=True)
113
 
114
+ @app.function(keep_warm=1, gpu="A10G", timeout=2400)
115
+ def main_remote():
116
+ # spin up Ollama daemon in the background
117
+ server = subprocess.Popen(["ollama", "serve"])
118
+ time.sleep(6) # give it a moment to bind :11434
119
+ try:
120
+ run_interface()
121
+ finally:
122
+ server.terminate()
123
+
124
+ def main_local():
125
+ run_interface()
126
+
127
+ if __name__ == "__main__":
128
+ # check if it is modal running the script or python running the script
129
+ # if it is modal, run the remote function
130
+ # if it is python, run the local function
131
+ if "modal" in sys.modules:
132
+ main_remote().remote()
133
+ else:
134
+ main_local()
modal.py DELETED
@@ -1,89 +0,0 @@
1
- from smolagents import CodeAgent, LiteLLMModel
2
- from smolagents.tools import ToolCollection
3
- import gradio as gr
4
- import modal
5
- import subprocess
6
- import time
7
-
8
- app = modal.App("agent-ontology")
9
-
10
- OLLAMA_IMAGE = (
11
- modal.Image.debian_slim()
12
- # pkill/pgrep come from procps
13
- .apt_install(
14
- "curl", "gnupg", "software-properties-common",
15
- "procps" # ← adds pkill, pgrep, ps …
16
- )
17
- # install Ollama
18
- .run_commands("curl -fsSL https://ollama.com/install.sh | sh")
19
- # spin up daemon, pull the model, shut daemon down
20
- .run_commands(
21
- "bash -c 'ollama serve >/dev/null 2>&1 & "
22
- "PID=$!; "
23
- "sleep 10 && "
24
- "ollama pull devstral:latest && "
25
- "kill $PID'"
26
- )
27
- # python deps
28
- .pip_install(
29
- "fastmcp>=2.6.1",
30
- "gradio[mcp]>=5.0.0",
31
- "huggingface_hub[mcp]>=0.32.2",
32
- "mcp>=1.9.2",
33
- "smolagents[litellm,mcp]>=1.17.0",
34
- "textblob>=0.19.0",
35
- )
36
- )
37
-
38
- # Specify the dependencies in the Modal function
39
- @app.function(image=OLLAMA_IMAGE, gpu="A10G", timeout=2400)
40
- def run_agent():
41
-
42
- def chat_with_agent(message, history):
43
- """Initialize MCP client for each request to avoid connection issues"""
44
- try:
45
- with ToolCollection.from_mcp(
46
- {"url": "https://notredameslab-nf-ontology.hf.space/gradio_api/mcp/sse", "transport": "sse"},
47
- trust_remote_code=True # Acknowledge that we trust this remote MCP server
48
- ) as tool_collection:
49
-
50
- model = LiteLLMModel(
51
- model_id="ollama/devstral:latest",
52
- api_base="http://localhost:11434",
53
- )
54
-
55
- agent = CodeAgent(
56
- tools=tool_collection.tools,
57
- model=model,
58
- additional_authorized_imports=["inspect", "json"]
59
- )
60
-
61
- additional_instructions = """
62
- ADDITIONAL IMPORTANT INSTRUCTIONS:
63
- use the tool "final_answer" in the code block to provide the answer to the user. Prints are only for debugging purposes. So, to give your results concatenate everything you want to print in a single "final_answer" call as such : final_answer(f"your answer here").
64
- """
65
-
66
- agent.system_prompt += additional_instructions
67
-
68
- result = agent.run(message)
69
- return str(result)
70
-
71
- except Exception as e:
72
- return f"❌ Error: {e}\nType: {type(e).__name__}"
73
-
74
- demo = gr.ChatInterface(
75
- fn=chat_with_agent,
76
- type="messages",
77
- examples=["can you extract input/output metadata from fastqc nf-core module ?"],
78
- title="Agent with MCP Tools (Per-Request Connection)",
79
- description="This version creates a new MCP connection for each request."
80
- )
81
- demo.launch(share=True)
82
-
83
- @app.local_entrypoint()
84
- def main():
85
- """Run the Modal app locally."""
86
- run_agent.remote()
87
-
88
- if __name__ == "__main__":
89
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tools/meta_yml_tools.py CHANGED
@@ -2,6 +2,23 @@ import requests
2
  import json
3
  import yaml
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  def get_meta_yml_file(module_name: str) -> dict:
6
  """
7
  Access the nf-core/modules repository and return the meta.yml file of the given module.
 
2
  import json
3
  import yaml
4
 
5
+ # TODO: placeholder function
6
+ def fetch_meta_yml(module_name):
7
+ # Adjust the URL or path to your actual source of nf-core modules
8
+ base_url = f"https://raw.githubusercontent.com/nf-core/modules/refs/heads/master/modules/nf-core/{module_name}/meta.yml"
9
+ try:
10
+ response = requests.get(base_url)
11
+ response.raise_for_status()
12
+ content = response.text
13
+
14
+ # Save for download
15
+ with open("meta.yml", "w") as f:
16
+ f.write(content)
17
+
18
+ return content, "meta.yml"
19
+ except Exception as e:
20
+ return f"Error: Could not retrieve meta.yml for module '{module_name}'\n{e}", None
21
+
22
  def get_meta_yml_file(module_name: str) -> dict:
23
  """
24
  Access the nf-core/modules repository and return the meta.yml file of the given module.