Spaces:
Sleeping
Sleeping
File size: 6,208 Bytes
ce67dc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
import streamlit as st
import json
from PIL import Image
from utils.params_extraction import get_path_params
from utils.llm_helper import get_llm_response
from vcelldb.vcell_api import query_vcell_models
from vcelldb.diagram import get_diagram_urls, get_bmkeys
favicon = Image.open("misc/favi.ico")
st.set_page_config(
page_title="VCell Chatbot Demo", page_icon=favicon, layout="centered"
)
# Header
col1, col2, col3 = st.columns([1, 5, 1])
with col1:
st.image("misc/gsoc.png", width=60)
with col2:
st.markdown(
"""
<h1 style='text-align: center; margin-bottom: 0;'>π€ VCell BioModel Chatbot Demo</h1>
<p style='text-align: center; font-size: 18px;'>Demo Built for Google Summer of Code 2025</p>
<p style='text-align: center; margin-top: 0.5em; font-size: 16px;'>
<a href='https://www.linkedin.com/in/kacem-mathlouthi/' target='_blank' style='text-decoration: none; margin-right: 20px;'>
<img src='https://cdn.jsdelivr.net/gh/devicons/devicon/icons/linkedin/linkedin-original.svg' alt='LinkedIn' width='22' style='vertical-align: middle; margin-right: 6px;'/>
LinkedIn
</a>
<a href='https://github.com/KacemMathlouthi/VCell-Demo' target='_blank' style='text-decoration: none; margin-right: 20px;'>
<img src='https://cdn.jsdelivr.net/gh/devicons/devicon/icons/github/github-original.svg' alt='GitHub' width='22' style='vertical-align: middle; margin-right: 6px;'/>
GitHub
</a>
<a href='mailto:kacem.mathlouthi@insat.ucar.tn' style='text-decoration: none;'>
<img src='https://upload.wikimedia.org/wikipedia/commons/7/7e/Gmail_icon_%282020%29.svg' alt='Gmail' width='22' style='vertical-align: middle; margin-right: 6px;'/>
Email
</a>
</p>
""",
unsafe_allow_html=True,
)
with col3:
st.image("misc/NRNB.png", width=300)
st.markdown("""
<div style="text-align: center; margin-top: 30px; color: #444;">
<p style="font-size: 18px;">π‘ Try one of these example prompts:</p>
<div style="display: flex; justify-content: center; flex-wrap: wrap; gap: 10px; margin-top: 20px;">
<div style="border: 1px solid #ccc; border-radius: 8px; padding: 10px 16px; background-color: #f8f9fa; font-size: 15px;">
List a VCell biomodels authored by the user ion
</div>
<div style="border: 1px solid #ccc; border-radius: 8px; padding: 10px 16px; background-color: #f8f9fa; font-size: 15px;">
Find a VCell biomodel with the id 201844485
</div>
</div>
</div>
""", unsafe_allow_html=True)
st.markdown("---")
# Session Initialization
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
# Chat History
for msg in st.session_state.chat_history:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
# Chat input
if prompt := st.chat_input("Ask something about VCell biomodels..."):
st.chat_message("user").markdown(prompt)
st.session_state.chat_history.append({"role": "user", "content": prompt})
# Step 1: Feature extraction
with st.spinner("π Extracting features..."):
extracted_params = get_path_params(prompt)
if "error" in extracted_params:
error_msg = f"Error extracting features: {extracted_params['error']}"
st.chat_message("assistant").markdown(error_msg)
st.session_state.chat_history.append(
{"role": "assistant", "content": error_msg}
)
else:
# Show extracted params
with st.expander("π§ Extracted Query Parameters", expanded=False):
st.json(extracted_params)
# Step 2: Query VCell
with st.spinner("π‘ Querying VCell models..."):
api_data = query_vcell_models(extracted_params)
if "error" in api_data:
error_msg = f"Error querying VCell API: {api_data['error']}"
st.chat_message("assistant").markdown(error_msg)
st.session_state.chat_history.append(
{"role": "assistant", "content": error_msg}
)
else:
# Show API data
with st.expander("π¦ VCell API Response", expanded=False):
st.json(api_data)
# Step 3: LLM Summary
with st.spinner("π€ Generating response..."):
llm_summary_prompt = f"""
You are a VCell BioModel assistant helping users understand biological models.
User question:
\"{prompt}\"
VCell API response:
{json.dumps(api_data, indent=2)}
Generate a helpful, detailled human-readable summary of the results. Explain the model, the simulations and the applications.
"""
llm_response = get_llm_response(llm_summary_prompt)
st.chat_message("assistant").markdown(llm_response)
st.session_state.chat_history.append(
{"role": "assistant", "content": llm_response}
)
# STEP 4: Show Diagrams
diagram_urls = get_diagram_urls(api_data)
if diagram_urls:
st.markdown("**Model Diagram(s):**")
for url in diagram_urls:
try:
st.image(url, use_container_width=True)
except:
st.markdown(f"[View Diagram]({url})")
# STEP 5: Show SBML and VCML Download Links
bmkeys = get_bmkeys(api_data)
if bmkeys:
st.markdown("### π₯ Download BioModel Files")
for bmkey in bmkeys:
st.markdown(
f"""
**BioModel ID:** `{bmkey}`
πΉ [Download SBML](https://vcell.cam.uchc.edu/api/v0/biomodel/{bmkey}/biomodel.sbml)
πΉ [Download VCML](https://vcell.cam.uchc.edu/api/v0/biomodel/{bmkey}/biomodel.vcml)
""",
unsafe_allow_html=True
)
else:
st.warning("β οΈ No BioModel keys found in the API response.")
|