mathysgrapotte commited on
Commit
d5fe8a1
Β·
1 Parent(s): 6cb2b11

add nice looking interface

Browse files
Files changed (1) hide show
  1. main.py +387 -21
main.py CHANGED
@@ -3,18 +3,27 @@ from tools.meta_yml_tools import get_meta_yml_file, extract_tools_from_meta_json
3
  from tools.bio_tools_tools import get_biotools_response, get_biotools_ontology
4
  from agents.query_ontology_db import agent
5
  import yaml
 
6
 
7
 
8
- def run_multi_agent(module_name):
9
-
 
 
 
 
10
  ### RETRIEVE INFORMATION FROM META.YML ###
11
-
12
  meta_yml = get_meta_yml_file(module_name=module_name)
 
 
 
13
  # module_info = extract_module_name_description(meta_file=meta_yml)
14
  # module_tools = extract_tools_from_meta_json(meta_file=meta_yml)
 
15
 
16
  # ### FIND THE MODULE TOOL ###
17
-
18
  # if len(module_info) == 1:
19
  # module_yaml_name = module_info[0]
20
  # module_description = module_info[1]
@@ -30,11 +39,12 @@ def run_multi_agent(module_name):
30
  # module_description = "my description" # TODO: this would be the answer of the first agent
31
 
32
  # ### EXTRACT INFO FROM META.YML ###
33
-
34
  # meta_info = extract_information_from_meta_json(meta_file=meta_yml, tool_name=module_yaml_name)
 
35
 
36
  # ### FETCH ONOTOLOGIES FROM BIO.TOOLS ###
37
-
38
  # if meta_info["bio_tools_id"] == "":
39
  # bio_tools_list = get_biotools_response(module_yaml_name)
40
 
@@ -55,13 +65,22 @@ def run_multi_agent(module_name):
55
  # # The agent should recieve the i/o from the module, the ontologies found in bio.tools, and assigne the correct ones to each channel.
56
 
57
  # ### FETCH ONTOLOGY TERMS FROM EDAM DATABASE ###
58
-
59
  results = {"input": {}, "output": {}}
60
 
 
 
 
61
  for input_channel in meta_yml["input"]:
 
 
 
 
62
  for ch_element in input_channel:
63
  for key, value in ch_element.items():
64
  if value["type"] == "file":
 
 
65
  result = agent.run(f"You are presentend with a file format for the input {key}, which is a file and is described by the following description: '{value['description']}', search for the best matches out of possible matches in the edam ontology (formated as format_XXXX), and return the answer (a list of ontology classes) in a final_answer call such as final_answer([format_XXXX, format_XXXX, ...])")
66
  results["input"][key] = result
67
 
@@ -73,7 +92,7 @@ def run_multi_agent(module_name):
73
  # results["outputs"][key] = result
74
 
75
  ### FINAL AGENT TO BENCHMARK AND FIND THE COMMONALITIES BETWEEN BIO.TOOLS AND EDAM ###
76
-
77
  # TODO !!!
78
  # Get results from bio.tools and EDAM
79
  # The agent should doublecheck if results are correct (?)
@@ -81,39 +100,386 @@ def run_multi_agent(module_name):
81
  # and remove duplicates (this can be done through a python function?)
82
 
83
  ### UPDATE META.YML FILE ADDING ONTOLOGIES AND RETURN THE ANSWER ###
84
-
85
  # TODO: placeholder
86
  # This is returning the original meta.yml, but it should return the modified one with the ontologies added
87
  with open("tmp_meta.yml", "w") as fh:
88
  yaml.dump(meta_yml, fh)
 
 
 
 
89
  return meta_yml, "tmp_meta.yml" # TODO: placeholder
90
 
91
  def run_interface():
92
  """ Function to run the agent with a Gradio interface.
93
  This function sets up the Gradio interface and launches it.
94
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  # create the Gradio interface
96
- with gr.Blocks() as demo:
97
- gr.Markdown("### πŸ” Update an nf-core module `meta.yml` file by adding EDAM ontology terms.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
- # create the input textbox for the nf-core module name
100
- module_input = gr.Textbox(label="nf-core Module Name", placeholder="e.g. fastqc")
 
 
 
 
 
 
 
 
101
 
102
- # create the button to fetch the meta.yml file
103
- fetch_btn = gr.Button("Update meta.yml")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
- # create the output textbox for the meta.yml content and a download button
106
- meta_output = gr.Textbox(label="meta.yml content", lines=20)
107
- download_button = gr.File(label="Download meta.yml")
 
 
 
 
 
 
 
 
 
108
 
109
  # set the function to run when the button is clicked
110
  fetch_btn.click(
111
- fn=run_multi_agent, # TODO: change to final function
 
 
 
112
  inputs=module_input,
113
- outputs=[meta_output, download_button]
 
 
 
 
114
  )
 
 
 
 
 
 
 
 
 
 
 
115
 
116
  demo.launch()
117
 
118
  if __name__ == "__main__":
119
- run_multi_agent("fastqc")
 
3
  from tools.bio_tools_tools import get_biotools_response, get_biotools_ontology
4
  from agents.query_ontology_db import agent
5
  import yaml
6
+ import time
7
 
8
 
9
+ def run_multi_agent(module_name, progress=gr.Progress()):
10
+ """Enhanced function with progress tracking"""
11
+
12
+ progress(0, desc="πŸ¦™ Llama is waking up...")
13
+ time.sleep(0.5)
14
+
15
  ### RETRIEVE INFORMATION FROM META.YML ###
16
+ progress(0.1, desc="πŸ” Fetching meta.yml file...")
17
  meta_yml = get_meta_yml_file(module_name=module_name)
18
+ time.sleep(0.5)
19
+
20
+ progress(0.2, desc="πŸ¦™ Llama is analyzing the module structure...")
21
  # module_info = extract_module_name_description(meta_file=meta_yml)
22
  # module_tools = extract_tools_from_meta_json(meta_file=meta_yml)
23
+ time.sleep(0.5)
24
 
25
  # ### FIND THE MODULE TOOL ###
26
+ progress(0.3, desc="🧠 Llama is thinking about the best tool...")
27
  # if len(module_info) == 1:
28
  # module_yaml_name = module_info[0]
29
  # module_description = module_info[1]
 
39
  # module_description = "my description" # TODO: this would be the answer of the first agent
40
 
41
  # ### EXTRACT INFO FROM META.YML ###
42
+ progress(0.4, desc="πŸ“Š Extracting metadata information...")
43
  # meta_info = extract_information_from_meta_json(meta_file=meta_yml, tool_name=module_yaml_name)
44
+ time.sleep(0.5)
45
 
46
  # ### FETCH ONOTOLOGIES FROM BIO.TOOLS ###
47
+ progress(0.5, desc="πŸ”¬ Searching bio.tools database...")
48
  # if meta_info["bio_tools_id"] == "":
49
  # bio_tools_list = get_biotools_response(module_yaml_name)
50
 
 
65
  # # The agent should recieve the i/o from the module, the ontologies found in bio.tools, and assigne the correct ones to each channel.
66
 
67
  # ### FETCH ONTOLOGY TERMS FROM EDAM DATABASE ###
68
+ progress(0.6, desc="πŸ¦™ Llama is consulting the EDAM database...")
69
  results = {"input": {}, "output": {}}
70
 
71
+ total_inputs = len(meta_yml.get("input", []))
72
+ current_input = 0
73
+
74
  for input_channel in meta_yml["input"]:
75
+ current_input += 1
76
+ progress(0.6 + (current_input / total_inputs) * 0.3,
77
+ desc=f"πŸ” Processing input channel {current_input}/{total_inputs}...")
78
+
79
  for ch_element in input_channel:
80
  for key, value in ch_element.items():
81
  if value["type"] == "file":
82
+ progress(0.6 + (current_input / total_inputs) * 0.3,
83
+ desc=f"πŸ¦™ Llama is analyzing {key}...")
84
  result = agent.run(f"You are presentend with a file format for the input {key}, which is a file and is described by the following description: '{value['description']}', search for the best matches out of possible matches in the edam ontology (formated as format_XXXX), and return the answer (a list of ontology classes) in a final_answer call such as final_answer([format_XXXX, format_XXXX, ...])")
85
  results["input"][key] = result
86
 
 
92
  # results["outputs"][key] = result
93
 
94
  ### FINAL AGENT TO BENCHMARK AND FIND THE COMMONALITIES BETWEEN BIO.TOOLS AND EDAM ###
95
+ progress(0.9, desc="πŸ”„ Finalizing ontology mappings...")
96
  # TODO !!!
97
  # Get results from bio.tools and EDAM
98
  # The agent should doublecheck if results are correct (?)
 
100
  # and remove duplicates (this can be done through a python function?)
101
 
102
  ### UPDATE META.YML FILE ADDING ONTOLOGIES AND RETURN THE ANSWER ###
103
+ progress(0.95, desc="πŸ’Ύ Generating updated meta.yml...")
104
  # TODO: placeholder
105
  # This is returning the original meta.yml, but it should return the modified one with the ontologies added
106
  with open("tmp_meta.yml", "w") as fh:
107
  yaml.dump(meta_yml, fh)
108
+
109
+ progress(1.0, desc="βœ… Llama has finished! Meta.yml updated successfully!")
110
+ time.sleep(0.5)
111
+
112
  return meta_yml, "tmp_meta.yml" # TODO: placeholder
113
 
114
  def run_interface():
115
  """ Function to run the agent with a Gradio interface.
116
  This function sets up the Gradio interface and launches it.
117
  """
118
+
119
+ # Custom theme with nf-core colors
120
+ custom_theme = gr.themes.Soft(
121
+ primary_hue=gr.themes.colors.Color(
122
+ c50="#f0fdf4", # Very light green
123
+ c100="#dcfce7", # Light green
124
+ c200="#bbf7d0", # Lighter green
125
+ c300="#86efac", # Light nf-core green
126
+ c400="#4ade80", # Medium green
127
+ c500="#24B064", # Official nf-core green
128
+ c600="#16a34a", # Darker green
129
+ c700="#396E35", # nf-core dark green
130
+ c800="#166534", # Very dark green
131
+ c900="#14532d", # Darkest green
132
+ c950="#0f2419", # Ultra dark green
133
+ ),
134
+ secondary_hue=gr.themes.colors.Color(
135
+ c50="#fefce8", # Very light yellow
136
+ c100="#fef3c7", # Light yellow
137
+ c200="#fde68a", # Lighter yellow
138
+ c300="#fcd34d", # Light yellow
139
+ c400="#f59e0b", # Medium yellow
140
+ c500="#ECDC86", # nf-core yellow
141
+ c600="#d97706", # Darker yellow
142
+ c700="#b45309", # Dark yellow
143
+ c800="#92400e", # Very dark yellow
144
+ c900="#78350f", # Darkest yellow
145
+ c950="#451a03", # Ultra dark yellow
146
+ ),
147
+ neutral_hue=gr.themes.colors.Color(
148
+ c50="#f8f9fa", # Bootstrap gray-100
149
+ c100="#e9ecef", # Bootstrap gray-200
150
+ c200="#dee2e6", # Bootstrap gray-300
151
+ c300="#ced4da", # Bootstrap gray-400
152
+ c400="#adb5bd", # Bootstrap gray-500
153
+ c500="#6c757d", # Bootstrap gray-600
154
+ c600="#495057", # Bootstrap gray-700
155
+ c700="#343a40", # Bootstrap gray-800
156
+ c800="#212529", # Bootstrap gray-900 (main nf-core background)
157
+ c900="#3F2B29", # nf-core brown
158
+ c950="#1a1411", # Ultra dark brown
159
+ ),
160
+ font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"],
161
+ radius_size="lg",
162
+ spacing_size="md"
163
+ )
164
+
165
+ # Custom CSS with nf-core branding
166
+ custom_css = """
167
+ /* Main container styling with nf-core colors */
168
+ .gradio-container {
169
+ background: linear-gradient(135deg, #24B064 0%, #396E35 50%, #3F2B29 100%) !important;
170
+ min-height: 100vh;
171
+ }
172
+
173
+ .main-header {
174
+ text-align: center;
175
+ padding: 2rem 0;
176
+ background: rgba(33, 37, 41, 0.95);
177
+ border-radius: 20px;
178
+ margin: 1rem 0 2rem 0;
179
+ backdrop-filter: blur(10px);
180
+ border: 2px solid rgba(36, 176, 100, 0.5);
181
+ box-shadow: 0 8px 32px rgba(36, 176, 100, 0.3);
182
+ }
183
+
184
+ .main-header h1 {
185
+ color: #24B064 !important;
186
+ font-size: 2.5rem !important;
187
+ font-weight: 700 !important;
188
+ margin: 0 !important;
189
+ text-shadow: 0 2px 4px rgba(36, 176, 100, 0.3);
190
+ }
191
+
192
+ .main-header p {
193
+ color: #e9ecef !important;
194
+ font-size: 1.1rem !important;
195
+ margin: 0.5rem 0 0 0 !important;
196
+ }
197
+
198
+ .nf-core-logo {
199
+ width: 60px;
200
+ height: 60px;
201
+ margin: 0 auto 1rem auto;
202
+ display: block;
203
+ filter: drop-shadow(0 4px 8px rgba(0,0,0,0.3));
204
+ }
205
+
206
+ /* Custom Llama Spinner with nf-core styling */
207
+ @keyframes llamaRun {
208
+ 0% { transform: translateX(-20px) rotate(-5deg); }
209
+ 50% { transform: translateX(20px) rotate(5deg); }
210
+ 100% { transform: translateX(-20px) rotate(-5deg); }
211
+ }
212
+
213
+ @keyframes llamaBounce {
214
+ 0%, 100% { transform: translateY(0px); }
215
+ 50% { transform: translateY(-10px); }
216
+ }
217
+
218
+ @keyframes nfCoreGlow {
219
+ 0%, 100% { box-shadow: 0 8px 32px rgba(36, 176, 100, 0.3); }
220
+ 50% { box-shadow: 0 8px 32px rgba(36, 176, 100, 0.6); }
221
+ }
222
+
223
+ .llama-loader {
224
+ display: flex;
225
+ flex-direction: column;
226
+ align-items: center;
227
+ justify-content: center;
228
+ padding: 2rem;
229
+ background: rgba(52, 58, 64, 0.95);
230
+ border-radius: 20px;
231
+ margin: 1rem;
232
+ border: 2px solid #24B064;
233
+ animation: nfCoreGlow 2s ease-in-out infinite;
234
+ backdrop-filter: blur(10px);
235
+ }
236
+
237
+ .llama-emoji {
238
+ font-size: 4rem;
239
+ animation: llamaRun 2s ease-in-out infinite, llamaBounce 1s ease-in-out infinite;
240
+ margin-bottom: 1rem;
241
+ filter: drop-shadow(0 4px 8px rgba(36, 176, 100, 0.3));
242
+ }
243
+
244
+ .llama-text {
245
+ font-size: 1.2rem;
246
+ color: #24B064;
247
+ font-weight: 600;
248
+ text-align: center;
249
+ margin: 0.5rem 0;
250
+ }
251
+
252
+ .llama-subtext {
253
+ font-size: 0.9rem;
254
+ color: #ECDC86;
255
+ text-align: center;
256
+ font-style: italic;
257
+ }
258
+
259
+ /* Input/Output styling with dark nf-core theme */
260
+ .input-container, .output-container {
261
+ background: rgba(52, 58, 64, 0.95) !important;
262
+ border-radius: 15px !important;
263
+ padding: 1.5rem !important;
264
+ margin: 1rem 0 !important;
265
+ box-shadow: 0 4px 20px rgba(0, 0, 0, 0.3) !important;
266
+ border: 2px solid rgba(36, 176, 100, 0.4) !important;
267
+ backdrop-filter: blur(10px) !important;
268
+ }
269
+
270
+ .input-container:hover, .output-container:hover {
271
+ border-color: #24B064 !important;
272
+ box-shadow: 0 6px 25px rgba(36, 176, 100, 0.4) !important;
273
+ transition: all 0.3s ease !important;
274
+ }
275
+
276
+ /* Button styling with nf-core green */
277
+ .btn-primary {
278
+ background: linear-gradient(45deg, #24B064, #396E35) !important;
279
+ border: none !important;
280
+ border-radius: 12px !important;
281
+ padding: 1rem 2rem !important;
282
+ font-weight: 600 !important;
283
+ font-size: 1.1rem !important;
284
+ color: white !important;
285
+ box-shadow: 0 4px 15px rgba(36, 176, 100, 0.4) !important;
286
+ transition: all 0.3s ease !important;
287
+ text-transform: none !important;
288
+ }
289
+
290
+ .btn-primary:hover {
291
+ transform: translateY(-2px) !important;
292
+ box-shadow: 0 6px 20px rgba(36, 176, 100, 0.6) !important;
293
+ background: linear-gradient(45deg, #396E35, #24B064) !important;
294
+ }
295
+
296
+ /* Progress bar with nf-core colors */
297
+ .progress-bar {
298
+ background: linear-gradient(90deg, #24B064, #ECDC86) !important;
299
+ }
300
+
301
+ /* Textbox styling with dark theme */
302
+ .gr-textbox {
303
+ border-radius: 10px !important;
304
+ border: 2px solid rgba(36, 176, 100, 0.3) !important;
305
+ transition: all 0.3s ease !important;
306
+ background: rgba(33, 37, 41, 0.9) !important;
307
+ color: #e9ecef !important;
308
+ }
309
+
310
+ .gr-textbox:focus {
311
+ border-color: #24B064 !important;
312
+ box-shadow: 0 0 0 3px rgba(36, 176, 100, 0.2) !important;
313
+ }
314
+
315
+ /* Section headers with dark theme */
316
+ .section-header {
317
+ color: #24B064 !important;
318
+ font-weight: 700 !important;
319
+ border-bottom: 2px solid #24B064 !important;
320
+ padding-bottom: 0.5rem !important;
321
+ margin-bottom: 1rem !important;
322
+ font-size: 1.1rem !important;
323
+ }
324
+
325
+ /* Labels and text in dark theme */
326
+ .gr-box label {
327
+ color: #e9ecef !important;
328
+ }
329
+
330
+ .gr-box .gr-text-sm {
331
+ color: #adb5bd !important;
332
+ }
333
+
334
+ /* Animation for results */
335
+ @keyframes slideInUp {
336
+ from {
337
+ transform: translateY(30px);
338
+ opacity: 0;
339
+ }
340
+ to {
341
+ transform: translateY(0);
342
+ opacity: 1;
343
+ }
344
+ }
345
+
346
+ .result-container {
347
+ animation: slideInUp 0.5s ease-out;
348
+ border: 1px solid #24B064 !important;
349
+ background: rgba(33, 37, 41, 0.9) !important;
350
+ }
351
+
352
+ .result-container textarea {
353
+ background: rgba(33, 37, 41, 0.9) !important;
354
+ color: #e9ecef !important;
355
+ border: 1px solid rgba(36, 176, 100, 0.3) !important;
356
+ }
357
+
358
+ /* Footer with nf-core styling */
359
+ .nf-core-footer {
360
+ background: rgba(63, 43, 41, 0.95) !important;
361
+ border-radius: 15px !important;
362
+ padding: 1.5rem !important;
363
+ margin: 2rem 0 1rem 0 !important;
364
+ backdrop-filter: blur(10px) !important;
365
+ border: 1px solid rgba(36, 176, 100, 0.4) !important;
366
+ }
367
+
368
+ .nf-core-footer p {
369
+ color: rgba(255, 255, 255, 0.9) !important;
370
+ margin: 0 !important;
371
+ }
372
+
373
+ .nf-core-footer strong {
374
+ color: #ECDC86 !important;
375
+ }
376
+
377
+ /* File component dark theme */
378
+ .gr-file {
379
+ background: rgba(33, 37, 41, 0.9) !important;
380
+ border: 1px solid rgba(36, 176, 100, 0.3) !important;
381
+ color: #e9ecef !important;
382
+ }
383
+ """
384
+
385
  # create the Gradio interface
386
+ with gr.Blocks(theme=custom_theme, css=custom_css, title="πŸ¦™ nf-core Ontology Assistant") as demo:
387
+
388
+ # Header with nf-core logo
389
+ gr.HTML("""
390
+ <div class="main-header">
391
+ <img src="https://raw.githubusercontent.com/nf-core/logos/master/nf-core-logos/nf-core-logo-square.png" class="nf-core-logo" alt="nf-core logo">
392
+ <h1>πŸ¦™ nf-core Ontology Assistant</h1>
393
+ <p>Intelligent nf-core meta.yml enhancement with EDAM ontology terms</p>
394
+ </div>
395
+ """)
396
+
397
+ with gr.Row():
398
+ with gr.Column(scale=1, elem_classes="input-container"):
399
+ gr.HTML("""
400
+ <div class="section-header">
401
+ πŸ“ Module Configuration
402
+ </div>
403
+ """)
404
+
405
+ # create the input textbox for the nf-core module name
406
+ module_input = gr.Textbox(
407
+ label="πŸ”§ nf-core Module Name",
408
+ placeholder="e.g. fastqc, samtools, bwa...",
409
+ info="Enter the name of the nf-core module you want to enhance",
410
+ elem_classes="gr-textbox"
411
+ )
412
 
413
+ # create the button to fetch the meta.yml file
414
+ fetch_btn = gr.Button(
415
+ "πŸš€ Start nf-core Analysis",
416
+ variant="primary",
417
+ elem_classes="btn-primary",
418
+ size="lg"
419
+ )
420
+
421
+ # Llama status indicator
422
+ status_display = gr.HTML(visible=False)
423
 
424
+ with gr.Column(scale=1, elem_classes="output-container"):
425
+ gr.HTML("""
426
+ <div class="section-header">
427
+ πŸ“Š Enhanced Results
428
+ </div>
429
+ """)
430
+
431
+ # create the output textbox for the meta.yml content and a download button
432
+ meta_output = gr.Textbox(
433
+ label="πŸ“„ Updated meta.yml content",
434
+ lines=15,
435
+ interactive=False,
436
+ elem_classes="result-container"
437
+ )
438
+
439
+ download_button = gr.File(
440
+ label="πŸ’Ύ Download Enhanced meta.yml",
441
+ elem_classes="result-container"
442
+ )
443
 
444
+ # Progress indicator function
445
+ def show_llama_status():
446
+ return gr.HTML("""
447
+ <div class="llama-loader">
448
+ <div class="llama-emoji">πŸ¦™</div>
449
+ <div class="llama-text">nf-core Llama is working hard!</div>
450
+ <div class="llama-subtext">Analyzing ontologies and enhancing your meta.yml...</div>
451
+ </div>
452
+ """, visible=True)
453
+
454
+ def hide_llama_status():
455
+ return gr.HTML("", visible=False)
456
 
457
  # set the function to run when the button is clicked
458
  fetch_btn.click(
459
+ fn=show_llama_status,
460
+ outputs=status_display
461
+ ).then(
462
+ fn=run_multi_agent,
463
  inputs=module_input,
464
+ outputs=[meta_output, download_button],
465
+ show_progress="full"
466
+ ).then(
467
+ fn=hide_llama_status,
468
+ outputs=status_display
469
  )
470
+
471
+ # Footer with nf-core branding
472
+ gr.HTML("""
473
+ <div class="nf-core-footer">
474
+ <p style="text-align: center;">
475
+ πŸ”¬ <strong>Powered by nf-core, EDAM Ontology & bio.tools</strong>
476
+ <br>
477
+ Built with ❀️ and πŸ¦™ for the nf-core community
478
+ </p>
479
+ </div>
480
+ """)
481
 
482
  demo.launch()
483
 
484
  if __name__ == "__main__":
485
+ run_interface()