Pontonkid commited on
Commit
9f8ae5f
·
verified ·
1 Parent(s): 6c4a649

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +191 -133
src/streamlit_app.py CHANGED
@@ -6,19 +6,23 @@ from PIL import Image
6
  from huggingface_hub import InferenceClient
7
 
8
  # -----------------------------------------------------------------------------
9
- # 1. SETUP & SECRETS
10
  # -----------------------------------------------------------------------------
11
- st.set_page_config(page_title="SHINUI | Intelligent Care", page_icon="✨", layout="wide")
12
-
13
- # AUTO-FIX FOR 403 ERROR
14
  config_dir = ".streamlit"
15
  if not os.path.exists(config_dir):
16
  os.makedirs(config_dir)
17
  with open(os.path.join(config_dir, "config.toml"), "w") as f:
18
  f.write("[server]\nenableXsrfProtection=false\nenableCORS=false\nmaxUploadSize=200\n")
19
 
 
 
 
 
 
20
  # RETRIEVE API KEY
 
21
  HF_TOKEN = os.environ.get("HF_TOKEN")
 
22
  if not HF_TOKEN:
23
  st.error("⚠️ API Key missing! Please add 'HF_TOKEN' in Space Settings > Secrets.")
24
  st.stop()
@@ -27,174 +31,228 @@ if not HF_TOKEN:
27
  client = InferenceClient(token=HF_TOKEN)
28
 
29
  # -----------------------------------------------------------------------------
30
- # 2. THE "UNGATED" BRAIN LOGIC
 
 
 
 
 
 
 
 
 
31
  # -----------------------------------------------------------------------------
32
  def get_ai_insight(input_type, content):
33
  """
34
- Uses Open-Source, Non-Gated models to avoid permission errors.
35
  """
36
- # The "Brain" prompt that interprets the data
37
- system_prompt = (
38
- "You are SHINUI, an advanced medical AI. "
39
- "Your goal is to analyze the user's input and provide clear, safe, and professional medical insights. "
40
- "Structure your answer with: 'Analysis', 'Risk Level', and 'Recommended Action'. "
41
- "Keep it concise and helpful."
42
- )
43
 
44
  try:
45
- final_text_input = ""
46
-
47
- # A. VISION (Uses BLIP - Ungated)
48
  if input_type == "Image":
49
- # 1. We use BLIP to "see" the image and describe it to text
50
- # This model is open and requires no permissions.
51
- image_description = client.image_to_text(
52
- model="Salesforce/blip-image-captioning-large",
53
- image=content
 
 
 
 
 
 
 
 
 
 
54
  )
55
- # 2. We send that description to the LLM to "medicalize" it
56
- final_text_input = f"I have an image that shows: '{image_description}'. Please analyze this medically."
57
 
58
- # B. AUDIO (Uses Whisper - Ungated)
59
  elif input_type == "Audio":
60
- # 1. Transcribe audio to text
61
  transcription = client.automatic_speech_recognition(
62
  model="openai/whisper-large-v3-turbo",
63
  audio=content
64
  ).text
65
- final_text_input = f"The patient described their symptoms verbally: '{transcription}'. Analyze this."
 
 
 
 
 
 
 
 
66
 
67
- # C. TEXT (Direct)
68
  elif input_type == "Text":
69
- final_text_input = f"Patient notes: '{content}'. Analyze this."
70
-
71
- # D. FINAL REASONING (Uses Zephyr - Ungated & Smart)
72
- # Zephyr-7b-beta is free, open, and very good at following instructions.
73
- messages = [
74
- {"role": "system", "content": system_prompt},
75
- {"role": "user", "content": final_text_input}
76
- ]
77
-
78
- response = client.chat_completion(
79
- model="HuggingFaceH4/zephyr-7b-beta",
80
- messages=messages,
81
- max_tokens=400
82
- )
83
-
84
- return response.choices[0].message.content
85
 
86
  except Exception as e:
87
  return f"⚠️ Analysis Error: {str(e)}"
88
 
89
  # -----------------------------------------------------------------------------
90
- # 3. VISUAL STYLING (TIVE.COM STYLE)
91
  # -----------------------------------------------------------------------------
92
- if 'history' not in st.session_state: st.session_state.history = []
93
- if 'result' not in st.session_state: st.session_state.result = None
94
-
95
  st.markdown("""
96
  <style>
97
  @import url('https://fonts.googleapis.com/css2?family=Plus+Jakarta+Sans:wght@300;400;600;800&display=swap');
98
-
99
  .stApp {
100
  background-color: #020617;
101
  background-image: radial-gradient(circle at 50% 0%, #1e293b 0%, #020617 70%);
102
  font-family: 'Plus Jakarta Sans', sans-serif;
103
  color: #f8fafc;
104
  }
105
-
106
- /* Card Styling */
107
  .shinui-card {
108
- background: rgba(30, 41, 59, 0.4);
109
- border: 1px solid rgba(148, 163, 184, 0.1);
110
- border-radius: 16px;
111
- padding: 25px;
112
- backdrop-filter: blur(12px);
113
- margin-bottom: 20px;
114
  }
115
-
116
- /* Buttons */
117
  div.stButton > button {
118
- background: #38bdf8;
119
- color: #0f172a;
120
- border: none;
121
- font-weight: 700;
122
- padding: 12px 20px;
123
- border-radius: 8px;
124
- width: 100%;
125
- transition: all 0.3s;
126
- }
127
- div.stButton > button:hover {
128
- background: #ffffff;
129
- box-shadow: 0 0 20px rgba(56, 189, 248, 0.5);
130
  }
131
-
132
- /* Headers */
133
- h1, h2, h3 { color: white; font-weight: 700; }
134
-
135
  #MainMenu, footer, header {visibility: hidden;}
136
  </style>
137
  """, unsafe_allow_html=True)
138
 
139
  # -----------------------------------------------------------------------------
140
- # 4. APP UI
141
- # -----------------------------------------------------------------------------
142
- c1, c2 = st.columns([1, 8])
143
- with c1: st.markdown("### SHINUI")
144
- st.markdown("<hr style='border-color:rgba(255,255,255,0.1)'>", unsafe_allow_html=True)
145
-
146
- # Tabs
147
- t1, t2, t3 = st.tabs(["📷 Vision", "🎙️ Voice", "📝 Text"])
148
-
149
- # Tab 1: Vision
150
- with t1:
151
- st.markdown("<div class='shinui-card'>", unsafe_allow_html=True)
152
- img_file = st.file_uploader("Upload Scan / Image", type=['png','jpg','jpeg'])
153
- if img_file and st.button("Analyze Visual", key="btn_img"):
154
- image = Image.open(img_file)
155
- with st.spinner("Processing Visual Data..."):
156
- res = get_ai_insight("Image", image)
157
- st.session_state.result = res
158
- st.session_state.history.append(res)
159
- st.markdown("</div>", unsafe_allow_html=True)
160
-
161
- # Tab 2: Voice
162
- with t2:
163
- st.markdown("<div class='shinui-card'>", unsafe_allow_html=True)
164
- # Uses new Streamlit Audio Input (Record directly in browser)
165
- audio_input = st.audio_input("Record Symptoms")
166
- if audio_input:
167
- if st.button("Analyze Recording", key="btn_audio"):
168
- with st.spinner("Transcribing & Analyzing..."):
169
- # Read audio bytes
170
- res = get_ai_insight("Audio", audio_input.read())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  st.session_state.result = res
172
  st.session_state.history.append(res)
173
- st.markdown("</div>", unsafe_allow_html=True)
174
-
175
- # Tab 3: Text
176
- with t3:
177
- st.markdown("<div class='shinui-card'>", unsafe_allow_html=True)
178
- txt = st.text_area("Clinical Notes / Symptoms")
179
- if txt and st.button("Analyze Notes", key="btn_txt"):
180
- with st.spinner("Analyzing..."):
181
- res = get_ai_insight("Text", txt)
182
- st.session_state.result = res
183
- st.session_state.history.append(res)
184
- st.markdown("</div>", unsafe_allow_html=True)
185
-
186
- # -----------------------------------------------------------------------------
187
- # 5. RESULTS & HISTORY
188
- # -----------------------------------------------------------------------------
189
- if st.session_state.result:
190
- st.markdown(f"""
191
- <div class='shinui-card' style='border-left: 5px solid #38bdf8;'>
192
- <h3 style='margin-top:0; color:#38bdf8;'>Analysis Result</h3>
193
- <div style='white-space: pre-wrap; color: #e2e8f0; line-height: 1.6;'>{st.session_state.result}</div>
194
- </div>
195
- """, unsafe_allow_html=True)
196
-
197
- if st.session_state.history:
198
- with st.expander("View Session History"):
199
- for i, h in enumerate(reversed(st.session_state.history)):
200
- st.markdown(f"**Scan {len(st.session_state.history)-i}:** {h[:100]}...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  from huggingface_hub import InferenceClient
7
 
8
  # -----------------------------------------------------------------------------
9
+ # 0. AUTO-FIX FOR UPLOAD ERROR (RUNS FIRST)
10
  # -----------------------------------------------------------------------------
 
 
 
11
  config_dir = ".streamlit"
12
  if not os.path.exists(config_dir):
13
  os.makedirs(config_dir)
14
  with open(os.path.join(config_dir, "config.toml"), "w") as f:
15
  f.write("[server]\nenableXsrfProtection=false\nenableCORS=false\nmaxUploadSize=200\n")
16
 
17
+ # -----------------------------------------------------------------------------
18
+ # 1. SETUP & CONFIGURATION
19
+ # -----------------------------------------------------------------------------
20
+ st.set_page_config(page_title="SHINUI | Intelligent Care", page_icon="✨", layout="wide")
21
+
22
  # RETRIEVE API KEY
23
+ # Make sure "HF_TOKEN" is in your Space Settings -> Secrets
24
  HF_TOKEN = os.environ.get("HF_TOKEN")
25
+
26
  if not HF_TOKEN:
27
  st.error("⚠️ API Key missing! Please add 'HF_TOKEN' in Space Settings > Secrets.")
28
  st.stop()
 
31
  client = InferenceClient(token=HF_TOKEN)
32
 
33
  # -----------------------------------------------------------------------------
34
+ # 2. STATE MANAGEMENT (LOGIN & HISTORY)
35
+ # -----------------------------------------------------------------------------
36
+ if 'page' not in st.session_state: st.session_state.page = 'landing'
37
+ if 'logged_in' not in st.session_state: st.session_state.logged_in = False
38
+ if 'user_email' not in st.session_state: st.session_state.user_email = ""
39
+ if 'history' not in st.session_state: st.session_state.history = []
40
+ if 'result' not in st.session_state: st.session_state.result = None
41
+
42
+ # -----------------------------------------------------------------------------
43
+ # 3. THE BRAIN (Llama 3.2 Logic)
44
  # -----------------------------------------------------------------------------
45
  def get_ai_insight(input_type, content):
46
  """
47
+ Handles AI processing using Llama 3.2 Vision Instruct.
48
  """
49
+ prompt_base = "You are SHINUI, a professional medical AI assistant. Analyze the following input and provide a concise risk assessment and next steps."
 
 
 
 
 
 
50
 
51
  try:
52
+ # A. VISION (Llama 3.2 Vision)
 
 
53
  if input_type == "Image":
54
+ messages = [
55
+ {
56
+ "role": "user",
57
+ "content": [
58
+ {"type": "image"},
59
+ {"type": "text", "text": prompt_base}
60
+ ]
61
+ }
62
+ ]
63
+ # Pass image directly to the client
64
+ response = client.chat_completion(
65
+ model="meta-llama/Llama-3.2-11B-Vision-Instruct",
66
+ messages=messages,
67
+ max_tokens=500,
68
+ image=content # The PIL Image object
69
  )
70
+ return response.choices[0].message.content
 
71
 
72
+ # B. AUDIO (Whisper -> Llama)
73
  elif input_type == "Audio":
74
+ # 1. Transcribe
75
  transcription = client.automatic_speech_recognition(
76
  model="openai/whisper-large-v3-turbo",
77
  audio=content
78
  ).text
79
+
80
+ # 2. Analyze Text
81
+ messages = [{"role": "user", "content": f"{prompt_base} Patient speech: '{transcription}'"}]
82
+ analysis = client.chat_completion(
83
+ model="meta-llama/Llama-3.2-11B-Vision-Instruct",
84
+ messages=messages,
85
+ max_tokens=500
86
+ )
87
+ return f"**Transcript:** '{transcription}'\n\n**Analysis:** {analysis.choices[0].message.content}"
88
 
89
+ # C. TEXT (Llama)
90
  elif input_type == "Text":
91
+ messages = [{"role": "user", "content": f"{prompt_base} Patient notes: '{content}'"}]
92
+ response = client.chat_completion(
93
+ model="meta-llama/Llama-3.2-11B-Vision-Instruct",
94
+ messages=messages,
95
+ max_tokens=500
96
+ )
97
+ return response.choices[0].message.content
 
 
 
 
 
 
 
 
 
98
 
99
  except Exception as e:
100
  return f"⚠️ Analysis Error: {str(e)}"
101
 
102
  # -----------------------------------------------------------------------------
103
+ # 4. UI STYLING (DARK THEME)
104
  # -----------------------------------------------------------------------------
 
 
 
105
  st.markdown("""
106
  <style>
107
  @import url('https://fonts.googleapis.com/css2?family=Plus+Jakarta+Sans:wght@300;400;600;800&display=swap');
 
108
  .stApp {
109
  background-color: #020617;
110
  background-image: radial-gradient(circle at 50% 0%, #1e293b 0%, #020617 70%);
111
  font-family: 'Plus Jakarta Sans', sans-serif;
112
  color: #f8fafc;
113
  }
 
 
114
  .shinui-card {
115
+ background: rgba(30, 41, 59, 0.4); border: 1px solid rgba(148, 163, 184, 0.1);
116
+ border-radius: 16px; padding: 25px; backdrop-filter: blur(12px); margin-bottom: 20px;
 
 
 
 
117
  }
 
 
118
  div.stButton > button {
119
+ background: #38bdf8; color: #0f172a; border: none; font-weight: 700;
120
+ padding: 12px 20px; border-radius: 8px; width: 100%; transition: all 0.3s;
 
 
 
 
 
 
 
 
 
 
121
  }
122
+ div.stButton > button:hover { background: #ffffff; box-shadow: 0 0 20px rgba(56, 189, 248, 0.5); }
 
 
 
123
  #MainMenu, footer, header {visibility: hidden;}
124
  </style>
125
  """, unsafe_allow_html=True)
126
 
127
  # -----------------------------------------------------------------------------
128
+ # 5. HELPER FUNCTIONS
129
+ # -----------------------------------------------------------------------------
130
+ def nav_to(page):
131
+ st.session_state.page = page
132
+ st.rerun()
133
+
134
+ def sign_out():
135
+ st.session_state.logged_in = False
136
+ st.session_state.history = []
137
+ st.session_state.result = None
138
+ st.session_state.user_email = ""
139
+ nav_to('landing')
140
+
141
+ # -----------------------------------------------------------------------------
142
+ # 6. PAGES
143
+ # -----------------------------------------------------------------------------
144
+
145
+ # --- LANDING ---
146
+ def show_landing():
147
+ c1, c2 = st.columns([1, 8])
148
+ with c1: st.markdown("### ✨ SHINUI")
149
+ st.markdown("<br><br>", unsafe_allow_html=True)
150
+
151
+ c1, c2 = st.columns([1.5, 1])
152
+ with c1:
153
+ st.markdown("""
154
+ <h1 style='font-size: 4rem; line-height: 1.1; margin-bottom: 20px;'>
155
+ Medical Intelligence.<br><span style='color:#38bdf8;'>Simplified.</span>
156
+ </h1>
157
+ <p style='font-size: 1.2rem; color: #94a3b8; margin-bottom: 40px;'>
158
+ SHINUI uses Llama 3.2 Vision AI to analyze medical data instantly.
159
+ </p>
160
+ """, unsafe_allow_html=True)
161
+ b1, b2 = st.columns([1, 2])
162
+ with b1:
163
+ if st.button("Sign In"): nav_to('login')
164
+
165
+ with c2:
166
+ st.markdown("""
167
+ <div class='shinui-card'>
168
+ <h3>🧬 Multimodal Brain</h3>
169
+ <p style='color:#94a3b8;'>Powered by Meta Llama 3.2 Vision.</p>
170
+ </div>
171
+ """, unsafe_allow_html=True)
172
+
173
+ # --- LOGIN ---
174
+ def show_login():
175
+ c1, c2, c3 = st.columns([1,1,1])
176
+ with c2:
177
+ st.markdown("<br><br>", unsafe_allow_html=True)
178
+ st.markdown("<div class='shinui-card' style='text-align:center;'><h2>Member Access</h2></div>", unsafe_allow_html=True)
179
+ email = st.text_input("Email")
180
+ password = st.text_input("Password", type="password")
181
+ if st.button("Authenticate"):
182
+ if email:
183
+ st.session_state.logged_in = True
184
+ st.session_state.user_email = email
185
+ nav_to('dashboard')
186
+ if st.button("Back"): nav_to('landing')
187
+
188
+ # --- DASHBOARD ---
189
+ def show_dashboard():
190
+ # SIDEBAR
191
+ with st.sidebar:
192
+ st.markdown(f"### 👤 {st.session_state.user_email}")
193
+ st.markdown("---")
194
+ st.write("HISTORY LOG")
195
+ if st.session_state.history:
196
+ for h in reversed(st.session_state.history):
197
+ st.markdown(f"<div style='font-size:0.8rem; padding:5px; border-left:2px solid #38bdf8; margin-bottom:5px;'>{h[:50]}...</div>", unsafe_allow_html=True)
198
+ else:
199
+ st.caption("No scans yet.")
200
+ st.markdown("---")
201
+ if st.button("Sign Out"): sign_out()
202
+
203
+ # MAIN UI
204
+ st.title("Diagnostic Interface")
205
+ t1, t2, t3 = st.tabs(["📷 Vision", "🎙️ Voice", "📝 Text"])
206
+
207
+ # TAB 1: VISION
208
+ with t1:
209
+ st.markdown("<div class='shinui-card'>", unsafe_allow_html=True)
210
+ img_file = st.file_uploader("Upload Scan", type=['png','jpg','jpeg'])
211
+ if img_file and st.button("Analyze Visual"):
212
+ image = Image.open(img_file)
213
+ with st.spinner("Processing Visual Data..."):
214
+ res = get_ai_insight("Image", image)
215
  st.session_state.result = res
216
  st.session_state.history.append(res)
217
+ st.markdown("</div>", unsafe_allow_html=True)
218
+
219
+ # TAB 2: VOICE
220
+ with t2:
221
+ st.markdown("<div class='shinui-card'>", unsafe_allow_html=True)
222
+ audio_input = st.audio_input("Record Symptoms")
223
+ if audio_input:
224
+ if st.button("Analyze Recording"):
225
+ with st.spinner("Transcribing & Analyzing..."):
226
+ res = get_ai_insight("Audio", audio_input.read())
227
+ st.session_state.result = res
228
+ st.session_state.history.append(res)
229
+ st.markdown("</div>", unsafe_allow_html=True)
230
+
231
+ # TAB 3: TEXT
232
+ with t3:
233
+ st.markdown("<div class='shinui-card'>", unsafe_allow_html=True)
234
+ txt = st.text_area("Clinical Notes")
235
+ if txt and st.button("Analyze Notes"):
236
+ with st.spinner("Thinking..."):
237
+ res = get_ai_insight("Text", txt)
238
+ st.session_state.result = res
239
+ st.session_state.history.append(res)
240
+ st.markdown("</div>", unsafe_allow_html=True)
241
+
242
+ # RESULTS AREA
243
+ if st.session_state.result:
244
+ st.markdown(f"""
245
+ <div class='shinui-card' style='border-left: 5px solid #38bdf8;'>
246
+ <h3 style='margin-top:0; color:#38bdf8;'>Analysis Complete</h3>
247
+ <div style='white-space: pre-wrap; color: #e2e8f0; line-height: 1.6;'>{st.session_state.result}</div>
248
+ </div>
249
+ """, unsafe_allow_html=True)
250
+
251
+ # -----------------------------------------------------------------------------
252
+ # 7. ROUTER
253
+ # -----------------------------------------------------------------------------
254
+ if st.session_state.page == 'landing': show_landing()
255
+ elif st.session_state.page == 'login': show_login()
256
+ elif st.session_state.page == 'dashboard':
257
+ if st.session_state.logged_in: show_dashboard()
258
+ else: nav_to('login')