dawit45's picture
Update app.py
68cebc6 verified
import streamlit as st
import pandas as pd
import numpy as np
import time
import io
import qrcode
from reportlab.lib.pagesizes import A4
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch
from reportlab.lib.utils import ImageReader
from omni_agent_v9 import OMNIOrchestratorV9
from omni_graph_v9 import OMNIGraphV9
# --- PDF GENERATION ENGINE (Text-Only / No Emojis) ---
def generate_signed_pdf(soap, genomics):
buffer = io.BytesIO()
c = canvas.Canvas(buffer, pagesize=A4)
width, height = A4
# Header Section
c.setFont("Helvetica-Bold", 16)
c.drawString(1*inch, height - 1*inch, "ABYSSINIA V9 | PRECISION CLINICAL REPORT")
c.setFont("Helvetica", 10)
c.drawString(1*inch, height - 1.2*inch, "DATE: 2026-02-13 | STATUS: VERIFIED")
c.line(1*inch, height - 1.3*inch, 7.2*inch, height - 1.3*inch)
# Content Body
text = c.beginText(1*inch, height - 1.8*inch)
text.setFont("Helvetica-Bold", 12)
text.textLine("CLINICAL SUMMARY (SOAP):")
text.setFont("Helvetica", 10)
text.setLeading(14)
# Wrap SOAP note to fit page
soap_lines = [soap[i:i+85] for i in range(0, len(soap), 85)]
for line in soap_lines[:12]:
text.textLine(line)
text.textLine("")
text.setFont("Helvetica-Bold", 12)
text.textLine("PHARMACOGENOMIC ANALYSIS:")
text.setFont("Helvetica", 10)
# Process Genomic Risks
gen_lines = genomics.split('\n')
for line in gen_lines:
clean_line = line.strip().replace('*', '-').replace('⚠️', '[!]')
if clean_line:
text.textLine(f"{clean_line}")
c.drawText(text)
# QR Verification Data
qr_data = f"ABY-V9-AUTH-{abs(hash(soap))}"
qr = qrcode.make(qr_data)
qr_img = io.BytesIO()
qr.save(qr_img, format='PNG')
qr_img.seek(0)
c.drawImage(ImageReader(qr_img), 1*inch, 1*inch, width=1.1*inch, height=1.1*inch)
# Signature Block
c.setFont("Helvetica-Bold", 11)
c.drawString(2.5*inch, 1.6*inch, "DIGITAL SIGNATURE")
c.setFont("Helvetica", 9)
c.drawString(2.5*inch, 1.4*inch, "AUTHORIZED BY: OMNI V9 ORCHESTRATOR")
c.drawString(2.5*inch, 1.25*inch, "HASH: " + str(abs(hash(qr_data))))
c.save()
buffer.seek(0)
return buffer
# --- MAIN PAGE CONFIG ---
st.set_page_config(page_title="Abyssinia V9 | Sovereign Web", layout="wide")
# --- CSS: V9 GLASSMORPHISM ---
st.markdown("""
<style>
/* Dark Background with Subtle Gradient */
.stApp {
background: linear-gradient(135deg, #0f0c29, #302b63, #24243e);
color: #e0e0e0;
}
/* Glassmorphic Cards */
.glass-card {
background: rgba(255, 255, 255, 0.05);
backdrop-filter: blur(10px);
-webkit-backdrop-filter: blur(10px);
border: 1px solid rgba(255, 255, 255, 0.1);
border-radius: 16px;
padding: 20px;
margin-bottom: 20px;
box-shadow: 0 4px 30px rgba(0, 0, 0, 0.1);
}
/* Headers */
h1, h2, h3 { font-family: 'Helvetica Neue', sans-serif; font-weight: 300; }
h1 { background: -webkit-linear-gradient(#eee, #333); -webkit-background-clip: text; }
/* Buttons */
.stButton button {
background: rgba(66, 133, 244, 0.2);
border: 1px solid rgba(66, 133, 244, 0.5);
color: white;
border-radius: 20px;
transition: 0.3s;
}
.stButton button:hover {
background: rgba(66, 133, 244, 0.6);
box-shadow: 0 0 15px rgba(66, 133, 244, 0.5);
}
</style>
""", unsafe_allow_html=True)
# Initialize Session State
if "orchestrator" not in st.session_state:
if "GEMINI_API_KEY" in st.secrets:
st.session_state.orchestrator = OMNIOrchestratorV9(st.secrets["GEMINI_API_KEY"])
else:
st.error("Missing GEMINI_API_KEY in Secrets.")
st.stop()
if "graph_engine" not in st.session_state:
st.session_state.graph_engine = OMNIGraphV9()
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "soap_note" not in st.session_state:
st.session_state.soap_note = ""
if "genomic_risks" not in st.session_state:
st.session_state.genomic_risks = ""
st.title("Abyssinia Intelligence V9")
st.markdown("**Sovereign Neuro-Symbolic Web**")
# --- TABS INTERFACE ---
tab1, tab2, tab3 = st.tabs(["Ambient Scribe", "Neural Graph", "Twin Chat"])
# --- TAB 1: DATA INGESTION ---
with tab1:
col1, col2 = st.columns(2)
with col1:
st.markdown('<div class="glass-card"><h3>Audio Stream</h3></div>', unsafe_allow_html=True)
audio = st.audio_input("Record Clinical Encounter")
if audio:
with st.spinner("Processing Audio..."):
st.session_state.soap_note = st.session_state.orchestrator.scribe_audio(audio.getvalue())
st.success("Audio Ingested")
with col2:
st.markdown('<div class="glass-card"><h3>Genomic VCF</h3></div>', unsafe_allow_html=True)
vcf = st.file_uploader("Upload VCF", type=['vcf', 'txt'])
if vcf:
with st.spinner("Mapping Variants..."):
st.session_state.genomic_risks = st.session_state.orchestrator.map_genomics(vcf.getvalue().decode())
st.success("Genomics Mapped")
# Display Context if Available
if st.session_state.soap_note:
st.markdown(f'<div class="glass-card"><b>Current Context:</b><br>{st.session_state.soap_note}<br><hr>{st.session_state.genomic_risks}</div>', unsafe_allow_html=True)
# Export Section (Conditional)
if st.session_state.soap_note and st.session_state.genomic_risks:
st.divider()
st.markdown("### Export Clinical Record")
if st.button("Generate Final Report"):
with st.spinner("Building Document..."):
report_pdf = generate_signed_pdf(
st.session_state.soap_note,
st.session_state.genomic_risks
)
st.download_button(
label="Download Signed PDF Report",
data=report_pdf,
file_name="Abyssinia_V9_Clinical_Report.pdf",
mime="application/pdf"
)
# --- TAB 2: 3D KNOWLEDGE GRAPH ---
with tab2:
if st.session_state.soap_note and st.session_state.genomic_risks:
st.markdown('<div class="glass-card"><h3>Live Neuro-Symbolic Map</h3></div>', unsafe_allow_html=True)
# Build Graph
st.session_state.graph_engine.build_patient_graph(
"Patient-5501",
st.session_state.soap_note,
st.session_state.genomic_risks
)
# Visualize
fig = st.session_state.graph_engine.visualize_3d()
st.plotly_chart(fig, use_container_width=True)
else:
st.info("Please ingest Audio and Genomics data in Tab 1 first.")
# --- TAB 3: DIGITAL TWIN CHAT ---
with tab3:
st.markdown('<div class="glass-card"><h3>Interact with Patient Digital Twin</h3></div>', unsafe_allow_html=True)
# Display History
for msg in st.session_state.chat_history:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
# Input
if prompt := st.chat_input("Ask the Digital Twin..."):
# User Message
st.session_state.chat_history.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Twin Response
with st.chat_message("assistant"):
context = f"Clinical: {st.session_state.soap_note}\nGenomic: {st.session_state.genomic_risks}"
response = st.session_state.orchestrator.chat_with_twin(
st.session_state.chat_history,
prompt,
context
)
st.markdown(response)
st.session_state.chat_history.append({"role": "assistant", "content": response})