RakeshNJ12345 commited on
Commit
f715625
Β·
verified Β·
1 Parent(s): e7abd9d

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +57 -30
src/streamlit_app.py CHANGED
@@ -3,7 +3,8 @@
3
  # ──── SET ENVIRONMENT VARIABLES BEFORE ANY IMPORTS ──────────────────────────────
4
  import os
5
  import tempfile
6
- import requests
 
7
 
8
  # Create dedicated cache directories
9
  CACHE_DIR = "/tmp/hf_cache"
@@ -21,7 +22,8 @@ os.environ.update({
21
  "TRANSFORMERS_CACHE": f"{CACHE_DIR}/transformers",
22
  "HF_HUB_CACHE": f"{CACHE_DIR}/huggingface_hub",
23
  "HUGGINGFACE_HUB_CACHE": f"{CACHE_DIR}/huggingface_hub",
24
- "HF_HUB_DISABLE_TELEMETRY": "1" # Disable telemetry to reduce rate limiting
 
25
  })
26
 
27
  # Create all cache directories explicitly
@@ -39,6 +41,17 @@ if not os.path.exists(CONFIG_TOML):
39
  with open(CONFIG_TOML, "w") as f:
40
  f.write("[browser]\n")
41
  f.write("gatherUsageStats = false\n")
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  # ──── NOW IMPORT OTHER LIBRARIES ───────────────────────────────────────────────
44
  import json
@@ -52,9 +65,7 @@ from huggingface_hub import hf_hub_download, HfApi
52
 
53
  # ──── MODEL DEFINITION ─────────────────────────────────────────────────────────
54
  MODEL_ID = "RakeshNJ12345/Chest-Radiology"
55
-
56
- # Alternative model access through proxy
57
- PROXY_URL = "https://hf-mirror.com"
58
 
59
  class TwoViewVisionReportModel(nn.Module):
60
  def __init__(self, vit: ViTModel, t5: T5ForConditionalGeneration, tokenizer: T5Tokenizer):
@@ -65,7 +76,7 @@ class TwoViewVisionReportModel(nn.Module):
65
  self.tokenizer = tokenizer
66
  self.t5 = t5
67
 
68
- def generate(self, img: torch.Tensor, max_length: int = 64) -> torch.Tensor:
69
  device = img.device
70
  vf = self.vit(pixel_values=img).pooler_output
71
  pf = self.proj_f(vf).unsqueeze(1)
@@ -95,7 +106,7 @@ class TwoViewVisionReportModel(nn.Module):
95
  )
96
  return out_ids
97
 
98
- # ──── MODEL LOADING WITH PROXY SUPPORT AND ERROR HANDLING ──────────────────────
99
  @st.cache_resource(show_spinner=False)
100
  def load_models():
101
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -108,7 +119,6 @@ def load_models():
108
  ]:
109
  os.makedirs(path, exist_ok=True)
110
 
111
- # Try to download using standard method first
112
  try:
113
  # Download config
114
  cfg_path = hf_hub_download(
@@ -119,20 +129,17 @@ def load_models():
119
  local_files_only=False
120
  )
121
  except Exception as e:
122
- st.error(f"❌ Failed to download model via Hugging Face Hub: {str(e)}")
123
  st.info("⚠️ Trying alternative download method...")
124
-
125
- # Use proxy mirror
126
- cfg_path = f"{CACHE_DIR}/huggingface_hub/config.json"
127
  api = HfApi(endpoint=PROXY_URL)
128
- api.hf_hub_download(
129
  repo_id=MODEL_ID,
130
  filename="config.json",
131
  repo_type="model",
132
  cache_dir=f"{CACHE_DIR}/huggingface_hub",
133
  local_files_only=False
134
  )
135
-
136
  cfg = json.load(open(cfg_path, "r"))
137
 
138
  # Load models with explicit cache directories
@@ -142,8 +149,8 @@ def load_models():
142
  ignore_mismatched_sizes=True,
143
  cache_dir=f"{CACHE_DIR}/transformers"
144
  ).to(device)
145
- except:
146
- # Use proxy if standard download fails
147
  vit = ViTModel.from_pretrained(
148
  "google/vit-base-patch16-224",
149
  ignore_mismatched_sizes=True,
@@ -156,7 +163,8 @@ def load_models():
156
  "t5-base",
157
  cache_dir=f"{CACHE_DIR}/transformers"
158
  ).to(device)
159
- except:
 
160
  t5 = T5ForConditionalGeneration.from_pretrained(
161
  "t5-base",
162
  cache_dir=f"{CACHE_DIR}/transformers",
@@ -168,7 +176,8 @@ def load_models():
168
  MODEL_ID,
169
  cache_dir=f"{CACHE_DIR}/transformers"
170
  )
171
- except:
 
172
  tok = T5Tokenizer.from_pretrained(
173
  MODEL_ID,
174
  cache_dir=f"{CACHE_DIR}/transformers",
@@ -186,8 +195,8 @@ def load_models():
186
  cache_dir=f"{CACHE_DIR}/huggingface_hub",
187
  local_files_only=False
188
  )
189
- except:
190
- # Use proxy mirror for model weights
191
  api = HfApi(endpoint=PROXY_URL)
192
  ckpt_path = api.hf_hub_download(
193
  repo_id=MODEL_ID,
@@ -201,11 +210,12 @@ def load_models():
201
  model.load_state_dict(state)
202
  return device, model, tok
203
 
204
- # ──── APP INTERFACE WITH ERROR HANDLING ───────────────────────────────────────
205
  try:
206
  device, model, tokenizer = load_models()
207
  except Exception as e:
208
  st.error(f"🚨 Critical Error: Failed to load models. {str(e)}")
 
209
  st.stop()
210
 
211
  transform = T.Compose([
@@ -214,7 +224,24 @@ transform = T.Compose([
214
  T.Normalize(mean=0.5, std=0.5),
215
  ])
216
 
217
- st.set_page_config(page_title="Radiology Report Analysis", layout="wide")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  st.markdown("<h1 style='text-align:center;'>🩺 Radiology Report Analysis</h1>", unsafe_allow_html=True)
219
  st.markdown("<p style='text-align:center;'>Upload a chest X-ray and click Generate Report.</p>", unsafe_allow_html=True)
220
 
@@ -223,9 +250,9 @@ if "img" not in st.session_state:
223
  uploaded = st.file_uploader("πŸ“€ Upload X-ray (PNG/JPG)", type=["png", "jpg", "jpeg"])
224
  if uploaded:
225
  try:
226
- # Validate image
227
  img = Image.open(uploaded).convert("RGB")
228
- img.verify() # Check if image is valid
 
229
  st.session_state.img = uploaded
230
  st.experimental_rerun()
231
  except Exception as e:
@@ -240,8 +267,8 @@ st.image(img, use_column_width=True)
240
 
241
  col1, col2 = st.columns(2)
242
  with col1:
243
- if st.button("▢️ Generate Report", use_container_width=True):
244
- with st.spinner("Analyzing X-ray..."):
245
  try:
246
  px = transform(img).unsqueeze(0).to(device)
247
  out_ids = model.generate(px, max_length=128)
@@ -261,8 +288,8 @@ with col2:
261
  # Add footer with troubleshooting
262
  st.markdown("---")
263
  st.markdown("""
264
- **Troubleshooting Tips:**
265
- - If model download fails, wait 5 minutes and refresh
266
- - Use standard chest X-ray images in PNG or JPG format
267
- - For persistent errors, contact support@example.com
268
  """)
 
3
  # ──── SET ENVIRONMENT VARIABLES BEFORE ANY IMPORTS ──────────────────────────────
4
  import os
5
  import tempfile
6
+ import sys
7
+ import atexit
8
 
9
  # Create dedicated cache directories
10
  CACHE_DIR = "/tmp/hf_cache"
 
22
  "TRANSFORMERS_CACHE": f"{CACHE_DIR}/transformers",
23
  "HF_HUB_CACHE": f"{CACHE_DIR}/huggingface_hub",
24
  "HUGGINGFACE_HUB_CACHE": f"{CACHE_DIR}/huggingface_hub",
25
+ "HF_HUB_DISABLE_TELEMETRY": "1",
26
+ "STREAMLIT_SERVER_ENABLE_FILE_WATCHER": "false"
27
  })
28
 
29
  # Create all cache directories explicitly
 
41
  with open(CONFIG_TOML, "w") as f:
42
  f.write("[browser]\n")
43
  f.write("gatherUsageStats = false\n")
44
+ f.write("[server]\n")
45
+ f.write("fileWatcherType = none\n")
46
+
47
+ # Monkey-patch Streamlit to prevent root directory access
48
+ def safe_makedirs(name, mode=0o777, exist_ok=False):
49
+ """Prevent Streamlit from creating directories outside /tmp"""
50
+ if name.startswith(('/.streamlit', '/root', '/home')):
51
+ name = name.replace('/', '/tmp/', 1)
52
+ return os.makedirs(name, mode, exist_ok)
53
+
54
+ os.makedirs = safe_makedirs
55
 
56
  # ──── NOW IMPORT OTHER LIBRARIES ───────────────────────────────────────────────
57
  import json
 
65
 
66
  # ──── MODEL DEFINITION ─────────────────────────────────────────────────────────
67
  MODEL_ID = "RakeshNJ12345/Chest-Radiology"
68
+ PROXY_URL = "https://hf-mirror.com" # Proxy for Hugging Face downloads
 
 
69
 
70
  class TwoViewVisionReportModel(nn.Module):
71
  def __init__(self, vit: ViTModel, t5: T5ForConditionalGeneration, tokenizer: T5Tokenizer):
 
76
  self.tokenizer = tokenizer
77
  self.t5 = t5
78
 
79
+ def generate(self, img: torch.Tensor, max_length: int = 128) -> torch.Tensor:
80
  device = img.device
81
  vf = self.vit(pixel_values=img).pooler_output
82
  pf = self.proj_f(vf).unsqueeze(1)
 
106
  )
107
  return out_ids
108
 
109
+ # ──── MODEL LOADING WITH ERROR HANDLING ────────────────────────────────────────
110
  @st.cache_resource(show_spinner=False)
111
  def load_models():
112
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
119
  ]:
120
  os.makedirs(path, exist_ok=True)
121
 
 
122
  try:
123
  # Download config
124
  cfg_path = hf_hub_download(
 
129
  local_files_only=False
130
  )
131
  except Exception as e:
132
+ st.error(f"❌ Failed to download model config: {str(e)}")
133
  st.info("⚠️ Trying alternative download method...")
 
 
 
134
  api = HfApi(endpoint=PROXY_URL)
135
+ cfg_path = api.hf_hub_download(
136
  repo_id=MODEL_ID,
137
  filename="config.json",
138
  repo_type="model",
139
  cache_dir=f"{CACHE_DIR}/huggingface_hub",
140
  local_files_only=False
141
  )
142
+
143
  cfg = json.load(open(cfg_path, "r"))
144
 
145
  # Load models with explicit cache directories
 
149
  ignore_mismatched_sizes=True,
150
  cache_dir=f"{CACHE_DIR}/transformers"
151
  ).to(device)
152
+ except Exception as e:
153
+ st.warning(f"⚠️ Standard ViT download failed: {str(e)}")
154
  vit = ViTModel.from_pretrained(
155
  "google/vit-base-patch16-224",
156
  ignore_mismatched_sizes=True,
 
163
  "t5-base",
164
  cache_dir=f"{CACHE_DIR}/transformers"
165
  ).to(device)
166
+ except Exception as e:
167
+ st.warning(f"⚠️ Standard T5 download failed: {str(e)}")
168
  t5 = T5ForConditionalGeneration.from_pretrained(
169
  "t5-base",
170
  cache_dir=f"{CACHE_DIR}/transformers",
 
176
  MODEL_ID,
177
  cache_dir=f"{CACHE_DIR}/transformers"
178
  )
179
+ except Exception as e:
180
+ st.warning(f"⚠️ Standard tokenizer download failed: {str(e)}")
181
  tok = T5Tokenizer.from_pretrained(
182
  MODEL_ID,
183
  cache_dir=f"{CACHE_DIR}/transformers",
 
195
  cache_dir=f"{CACHE_DIR}/huggingface_hub",
196
  local_files_only=False
197
  )
198
+ except Exception as e:
199
+ st.warning(f"⚠️ Standard model weights download failed: {str(e)}")
200
  api = HfApi(endpoint=PROXY_URL)
201
  ckpt_path = api.hf_hub_download(
202
  repo_id=MODEL_ID,
 
210
  model.load_state_dict(state)
211
  return device, model, tok
212
 
213
+ # ──── APP INTERFACE ───────────────────────────────────────────────────────────
214
  try:
215
  device, model, tokenizer = load_models()
216
  except Exception as e:
217
  st.error(f"🚨 Critical Error: Failed to load models. {str(e)}")
218
+ st.info("Please try refreshing the page or contact support@example.com")
219
  st.stop()
220
 
221
  transform = T.Compose([
 
224
  T.Normalize(mean=0.5, std=0.5),
225
  ])
226
 
227
+ st.set_page_config(
228
+ page_title="Radiology Report Analysis",
229
+ layout="wide",
230
+ # Disable Streamlit's default behavior that causes permission issues
231
+ initial_sidebar_state="collapsed"
232
+ )
233
+
234
+ # Custom CSS to hide Streamlit elements that might cause issues
235
+ st.markdown("""
236
+ <style>
237
+ .reportview-container .main .block-container {padding-top: 2rem;}
238
+ header {visibility: hidden;}
239
+ .stDeployButton {display:none;}
240
+ #MainMenu {visibility: hidden;}
241
+ footer {visibility: hidden;}
242
+ </style>
243
+ """, unsafe_allow_html=True)
244
+
245
  st.markdown("<h1 style='text-align:center;'>🩺 Radiology Report Analysis</h1>", unsafe_allow_html=True)
246
  st.markdown("<p style='text-align:center;'>Upload a chest X-ray and click Generate Report.</p>", unsafe_allow_html=True)
247
 
 
250
  uploaded = st.file_uploader("πŸ“€ Upload X-ray (PNG/JPG)", type=["png", "jpg", "jpeg"])
251
  if uploaded:
252
  try:
 
253
  img = Image.open(uploaded).convert("RGB")
254
+ # Quick verification by thumbnail generation
255
+ img.thumbnail((10, 10))
256
  st.session_state.img = uploaded
257
  st.experimental_rerun()
258
  except Exception as e:
 
267
 
268
  col1, col2 = st.columns(2)
269
  with col1:
270
+ if st.button("▢️ Generate Report", use_container_width=True, type="primary"):
271
+ with st.spinner("Analyzing X-ray. This may take 10-20 seconds..."):
272
  try:
273
  px = transform(img).unsqueeze(0).to(device)
274
  out_ids = model.generate(px, max_length=128)
 
288
  # Add footer with troubleshooting
289
  st.markdown("---")
290
  st.markdown("""
291
+ **Note:**
292
+ - First-time model loading may take 1-2 minutes
293
+ - For optimal results, use clear chest X-ray images
294
+ - Contact support@example.com for assistance
295
  """)