woffluon commited on
Commit
25a71e0
·
1 Parent(s): d174dac

First Upload

Browse files
.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ venv
2
+ __pycache__
3
+ *.pyc
4
+ *.pyo
5
+ *.pyd
6
+ *.log
7
+ *.tmp
8
+ *.swp
9
+ *.swo
10
+ *.DS_Store
11
+ __pycache__/
12
+ .Python
13
+ env/
14
+ env.bak/
15
+ .env
16
+ .venv/
17
+ build/
18
+ dist/
19
+ *.egg-info/
20
+ .idea/
21
+ .vscode/
22
+ .mypy_cache/
23
+ .pytest_cache/
24
+ htmlcov/
25
+ coverage.xml
26
+ *.cover
27
+ *.coverage
28
+ *.cov
29
+ *.egg
30
+ *.manifest
31
+ *.spec
32
+ profile_default/
33
+ ipython_config.py
34
+ .cache/
35
+ *.png
36
+ *.jpg
37
+ *.jpeg
38
+ models/*.keras
Dockerfile CHANGED
@@ -1,20 +1,46 @@
1
- FROM python:3.13.5-slim
 
2
 
 
 
 
 
 
 
 
3
  WORKDIR /app
4
 
5
- RUN apt-get update && apt-get install -y \
6
- build-essential \
7
- curl \
8
- git \
 
 
 
 
 
 
 
 
9
  && rm -rf /var/lib/apt/lists/*
10
 
 
11
  COPY requirements.txt ./
12
- COPY src/ ./src/
13
 
14
- RUN pip3 install -r requirements.txt
 
 
 
 
 
15
 
16
- EXPOSE 8501
 
 
 
 
17
 
18
- HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
19
 
20
- ENTRYPOINT ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"]
 
 
1
+ # Hugging Face Spaces - Dockerized Streamlit app
2
+ FROM python:3.10-slim
3
 
4
+ # Prevent Python from writing .pyc files and enable unbuffered logs
5
+ ENV PYTHONDONTWRITEBYTECODE=1 \
6
+ PYTHONUNBUFFERED=1 \
7
+ PIP_NO_CACHE_DIR=1 \
8
+ PIP_DISABLE_PIP_VERSION_CHECK=1
9
+
10
+ # Workdir
11
  WORKDIR /app
12
 
13
+ # System deps (opencv, fonts, image libs, etc.)
14
+ RUN apt-get update && apt-get install -y --no-install-recommends \
15
+ build-essential \
16
+ git \
17
+ ffmpeg \
18
+ libsm6 \
19
+ libxext6 \
20
+ libglib2.0-0 \
21
+ libgl1-mesa-glx \
22
+ libgtk2.0-dev \
23
+ libgtk-3-dev \
24
+ ca-certificates \
25
  && rm -rf /var/lib/apt/lists/*
26
 
27
+ # Copy dependency list first (better layer caching)
28
  COPY requirements.txt ./
 
29
 
30
+ # Install Python deps
31
+ RUN pip install --upgrade pip && \
32
+ pip install -r requirements.txt
33
+
34
+ # Copy app source
35
+ COPY . .
36
 
37
+ # Streamlit configuration for Spaces
38
+ ENV PORT=7860 \
39
+ STREAMLIT_SERVER_PORT=7860 \
40
+ STREAMLIT_SERVER_HEADLESS=true \
41
+ STREAMLIT_SERVER_ADDRESS=0.0.0.0
42
 
43
+ EXPOSE 7860
44
 
45
+ # Entrypoint - run Streamlit app
46
+ CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0"]
README.md DELETED
@@ -1,19 +0,0 @@
1
- ---
2
- title: PathoAI
3
- emoji: 🚀
4
- colorFrom: red
5
- colorTo: red
6
- sdk: docker
7
- app_port: 8501
8
- tags:
9
- - streamlit
10
- pinned: false
11
- short_description: Streamlit template space
12
- ---
13
-
14
- # Welcome to Streamlit!
15
-
16
- Edit `/src/streamlit_app.py` to customize this app to your heart's desire. :heart:
17
-
18
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
19
- forums](https://discuss.streamlit.io).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ import time
5
+ import traceback
6
+ import os
7
+ import requests
8
+ from core.config import Config
9
+ from core.inference import InferenceEngine
10
+ from core.image_processing import ImageProcessor
11
+ from ui.dashboard import (
12
+ render_css,
13
+ render_header,
14
+ render_classification_panel,
15
+ render_segmentation_panel
16
+ )
17
+
18
+ # Model indirme linkleri (Pixeldrain)
19
+ MODEL_URLS = {
20
+ "resnet50_best.keras": {
21
+ "url": "https://pixeldrain.com/api/file/Pn9gpTnb?download",
22
+ "description": "ResNet50 Siniflandirici",
23
+ "size_mb": 212
24
+ },
25
+ "cia_net_final_sota.keras": {
26
+ "url": "https://pixeldrain.com/api/file/BpPXq8Kw?download",
27
+ "description": "CIA-Net Segmentasyon",
28
+ "size_mb": 150
29
+ }
30
+ }
31
+
32
+ def format_size(bytes_size):
33
+ """Byte boyutunu okunabilir formata cevir."""
34
+ for unit in ['B', 'KB', 'MB', 'GB']:
35
+ if bytes_size < 1024:
36
+ return f"{bytes_size:.1f} {unit}"
37
+ bytes_size /= 1024
38
+ return f"{bytes_size:.1f} TB"
39
+
40
+ def download_model_with_progress(model_name, url, save_path, progress_bar, status_container):
41
+ """Model dosyasini progress bar ile indir."""
42
+ try:
43
+ # Baslangic istegi
44
+ response = requests.get(url, stream=True, timeout=30)
45
+ response.raise_for_status()
46
+
47
+ total_size = int(response.headers.get('content-length', 0))
48
+ downloaded = 0
49
+ chunk_size = 1024 * 1024 # 1MB chunks
50
+
51
+ # Gecici dosyaya yaz
52
+ temp_path = save_path + ".tmp"
53
+
54
+ with open(temp_path, 'wb') as f:
55
+ for chunk in response.iter_content(chunk_size=chunk_size):
56
+ if chunk:
57
+ f.write(chunk)
58
+ downloaded += len(chunk)
59
+
60
+ # Progress guncelle
61
+ if total_size > 0:
62
+ progress = downloaded / total_size
63
+ progress_bar.progress(progress)
64
+ status_container.markdown(
65
+ f"**{model_name}** indiriliyor... "
66
+ f"`{format_size(downloaded)}` / `{format_size(total_size)}` "
67
+ f"({progress*100:.1f}%)"
68
+ )
69
+
70
+ # Basarili indirme sonrasi dosyayi tasi
71
+ os.rename(temp_path, save_path)
72
+ return True
73
+
74
+ except requests.exceptions.Timeout:
75
+ status_container.error("Baglanti zaman asimina ugradi. Tekrar deneyin.")
76
+ return False
77
+ except requests.exceptions.ConnectionError:
78
+ status_container.error("Internet baglantisi bulunamadi.")
79
+ return False
80
+ except Exception as e:
81
+ status_container.error(f"Indirme hatasi: {str(e)}")
82
+ # Gecici dosyayi temizle
83
+ temp_path = save_path + ".tmp"
84
+ if os.path.exists(temp_path):
85
+ os.remove(temp_path)
86
+ return False
87
+
88
+ def check_and_download_models():
89
+ """Model dosyalarini kontrol et ve eksikleri indir."""
90
+ os.makedirs(Config.MODEL_DIR, exist_ok=True)
91
+
92
+ missing_models = []
93
+
94
+ if not os.path.exists(Config.CLS_MODEL_PATH):
95
+ missing_models.append(("resnet50_best.keras", Config.CLS_MODEL_PATH))
96
+ if not os.path.exists(Config.SEG_MODEL_PATH):
97
+ missing_models.append(("cia_net_final_sota.keras", Config.SEG_MODEL_PATH))
98
+
99
+ if not missing_models:
100
+ return True
101
+
102
+ # Indirme arayuzu
103
+ st.markdown("---")
104
+ st.subheader("Model Dosyalari Eksik")
105
+
106
+ # Eksik modelleri listele
107
+ total_size = 0
108
+ for model_name, _ in missing_models:
109
+ info = MODEL_URLS.get(model_name, {})
110
+ size = info.get("size_mb", 0)
111
+ desc = info.get("description", model_name)
112
+ total_size += size
113
+ st.markdown(f"- **{desc}** (`{model_name}`) - ~{size} MB")
114
+
115
+ st.info(f"Toplam indirme boyutu: ~{total_size} MB")
116
+
117
+ # Indirme butonu
118
+ if st.button("Modelleri Indir", type="primary"):
119
+ for i, (model_name, save_path) in enumerate(missing_models):
120
+ info = MODEL_URLS.get(model_name, {})
121
+ url = info.get("url", "")
122
+ desc = info.get("description", model_name)
123
+
124
+ st.markdown(f"### {i+1}/{len(missing_models)}: {desc}")
125
+
126
+ progress_bar = st.progress(0)
127
+ status_container = st.empty()
128
+
129
+ success = download_model_with_progress(
130
+ model_name, url, save_path, progress_bar, status_container
131
+ )
132
+
133
+ if success:
134
+ status_container.success(f"{desc} basariyla indirildi!")
135
+ else:
136
+ st.error("Indirme basarisiz. Sayfa yenilenerek tekrar deneyin.")
137
+ return False
138
+
139
+ st.success("Tum modeller indirildi! Sayfa yenileniyor...")
140
+ time.sleep(2)
141
+ st.rerun()
142
+
143
+ return False
144
+
145
+ def main():
146
+ render_css()
147
+
148
+ # Model dosyalarini kontrol et ve gerekirse indir
149
+ if not check_and_download_models():
150
+ st.error("Model dosyalari yuklenemedi. Lutfen internet baglantinizi kontrol edin.")
151
+ st.stop()
152
+
153
+ with st.sidebar:
154
+ st.title("Kontrol Paneli")
155
+ st.info("Sistem: PathoAI\nVersiyon: 1.0.0")
156
+ st.markdown("### Analiz Ayarları")
157
+ use_norm = st.toggle("Stain Normalization", value=True)
158
+ st.markdown("---")
159
+ st.write("© 2026 PathoAI - Tüm Hakları Saklıdır")
160
+
161
+ render_header(Config.APP_NAME, "2.1.0")
162
+
163
+ engine = InferenceEngine()
164
+ uploaded_file = st.file_uploader("Analiz edilecek histopatoloji görüntüsünü yükleyin", type=['png', 'jpg', 'jpeg', 'tif'])
165
+
166
+ if uploaded_file:
167
+ with st.spinner("Yapay Zeka Motorları Yükleniyor..."):
168
+ if not engine.load_models():
169
+ st.error("Model dosyaları yüklenemedi.")
170
+ st.stop()
171
+
172
+ file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
173
+ img_bgr = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
174
+ img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
175
+
176
+ if st.button("Analizi Başlat", type="primary"):
177
+ progress_bar = st.progress(0)
178
+ status_text = st.empty()
179
+ log_container = st.expander("Detaylı İşlem Logları", expanded=False)
180
+
181
+ try:
182
+ start_time = time.perf_counter()
183
+
184
+ with log_container:
185
+ st.write("**Analiz başlatıldı...**")
186
+ st.write(f"Görüntü boyutu: {img_rgb.shape[1]}x{img_rgb.shape[0]} piksel")
187
+
188
+ # Ön işleme
189
+ t_pre_start = time.perf_counter()
190
+ status_text.text("Ön işleme yapılıyor...")
191
+ progress_bar.progress(10)
192
+
193
+ if use_norm:
194
+ with log_container:
195
+ st.write("Macenko stain normalization uygulanıyor...")
196
+ proc_img = ImageProcessor.macenko_normalize(img_rgb)
197
+ with log_container:
198
+ st.write("Renk normalizasyonu tamamlandı")
199
+ else:
200
+ proc_img = img_rgb
201
+ with log_container:
202
+ st.write("Normalizasyon atlandı (ham görüntü kullanılıyor)")
203
+
204
+ t_pre_end = time.perf_counter()
205
+ with log_container:
206
+ st.write(f"Ön işleme süresi: **{(t_pre_end - t_pre_start):.3f} s**")
207
+ progress_bar.progress(20)
208
+
209
+ # Sınıflandırma
210
+ t_cls_start = time.perf_counter()
211
+ status_text.text("ResNet50 ile doku sınıflandırması yapılıyor...")
212
+
213
+ with log_container:
214
+ st.write("**ResNet50 Classifier çalışıyor...**")
215
+
216
+ c_idx, cls_conf, tensor = engine.predict_classification(proc_img)
217
+
218
+ t_cls_end = time.perf_counter()
219
+ with log_container:
220
+ st.write(f"Tanı: **{Config.CLASSES[c_idx]}**")
221
+ st.write(f"Güven skoru: **%{cls_conf*100:.2f}**")
222
+ st.write(f"Sınıflandırma süresi: **{(t_cls_end - t_cls_start):.3f} s**")
223
+
224
+ progress_bar.progress(40)
225
+
226
+ # Grad-CAM
227
+ t_cam_start = time.perf_counter()
228
+ status_text.text("Grad-CAM aktivasyon haritası oluşturuluyor...")
229
+
230
+ with log_container:
231
+ st.write("**Grad-CAM XAI analizi başlatılıyor...**")
232
+
233
+ heatmap = engine.generate_gradcam(tensor, c_idx)
234
+
235
+ t_cam_end = time.perf_counter()
236
+ with log_container:
237
+ if np.max(heatmap) > 0:
238
+ st.write("Grad-CAM başarıyla oluşturuldu")
239
+ else:
240
+ st.warning("Grad-CAM oluşturulamadı (boş heatmap)")
241
+ st.write(f"Grad-CAM süresi: **{(t_cam_end - t_cam_start):.3f} s**")
242
+
243
+ progress_bar.progress(60)
244
+
245
+ # Segmentasyon
246
+ t_seg_start = time.perf_counter()
247
+ status_text.text("CIA-Net ile hücre segmentasyonu yapılıyor...")
248
+
249
+ with log_container:
250
+ st.write("**CIA-Net Segmenter çalışıyor...**")
251
+
252
+ nuc_map, con_map, seg_conf = engine.predict_segmentation(proc_img)
253
+
254
+ t_seg_end = time.perf_counter()
255
+ with log_container:
256
+ st.write(f"Nükleus haritası oluşturuldu")
257
+ st.write(f"Segmentasyon güveni: **%{seg_conf*100:.2f}**")
258
+ st.write(f"Segmentasyon süresi: **{(t_seg_end - t_seg_start):.3f} s**")
259
+
260
+ progress_bar.progress(75)
261
+
262
+ # Post-processing
263
+ t_post_start = time.perf_counter()
264
+ status_text.text("Hücre ayrıştırma ve morfolojik analiz...")
265
+ with log_container:
266
+ st.write("**Watershed algoritması uygulanıyor...**")
267
+
268
+ mask = ImageProcessor.adaptive_watershed(nuc_map, con_map)
269
+
270
+ t_watershed_end = time.perf_counter()
271
+ with log_container:
272
+ unique_cells = len(np.unique(mask)) - 1
273
+ st.write(f"Tespit edilen hücre sayısı: **{unique_cells}**")
274
+ st.write(f"Watershed/post-processing süresi: **{(t_watershed_end - t_post_start):.3f} s**")
275
+
276
+ progress_bar.progress(85)
277
+
278
+ with log_container:
279
+ st.write("**Belirsizlik (entropy) hesaplanıyor...**")
280
+
281
+ entropy = ImageProcessor.calculate_entropy(nuc_map)
282
+
283
+ t_entropy_end = time.perf_counter()
284
+ with log_container:
285
+ st.write(f"Ortalama entropi: **{np.mean(entropy):.3f}**")
286
+ st.write(f"Entropi hesaplama süresi: **{(t_entropy_end - t_watershed_end):.3f} s**")
287
+
288
+ progress_bar.progress(90)
289
+
290
+ with log_container:
291
+ st.write("**Morfometrik özellikler çıkarılıyor...**")
292
+
293
+ stats = ImageProcessor.calculate_morphometrics(mask)
294
+
295
+ t_morph_end = time.perf_counter()
296
+ with log_container:
297
+ if not stats.empty:
298
+ st.write(f"{len(stats)} hücre için morfoloji hesaplandı")
299
+ st.write(f" - Ortalama alan: {stats['Area'].mean():.1f} px²")
300
+ st.write(f" - Ortalama dairesellik: {stats['Circularity'].mean():.3f}")
301
+ else:
302
+ st.warning("Hücre tespit edilemedi")
303
+ st.write(f"Morfometrik analiz süresi: **{(t_morph_end - t_entropy_end):.3f} s**")
304
+
305
+ progress_bar.progress(100)
306
+
307
+ elapsed = time.perf_counter() - start_time
308
+
309
+ with log_container:
310
+ st.success(f"**Analiz tamamlandı!** (Süre: {elapsed:.2f} saniye)")
311
+
312
+ status_text.empty()
313
+
314
+ # Sonuçları göster
315
+ render_classification_panel(img_rgb, Config.CLASSES[c_idx], cls_conf, seg_conf, heatmap)
316
+ render_segmentation_panel(img_rgb, nuc_map, entropy, mask, stats)
317
+
318
+ except Exception as e:
319
+ st.error(f"Hata: {e}")
320
+ with log_container:
321
+ st.code(traceback.format_exc())
322
+
323
+ if __name__ == "__main__":
324
+ main()
core/__init__.py ADDED
File without changes
core/config.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ class Config:
4
+ """Central Configuration Management"""
5
+ APP_NAME = "PathoAI"
6
+ VERSION = "v1.0.0"
7
+
8
+ # Image Parameters
9
+ IMG_SIZE = (224, 224)
10
+ INPUT_SHAPE = (224, 224, 3)
11
+
12
+ # Class Definitions
13
+ CLASSES = [
14
+ 'Benign (Normal Doku)',
15
+ 'Adenocarcinoma (Akciğer Kanseri Tip 1)',
16
+ 'Squamous Cell Carcinoma (Akciğer Kanseri Tip 2)'
17
+ ]
18
+
19
+ # File Paths
20
+ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
21
+ MODEL_DIR = os.path.join(BASE_DIR, "models")
22
+ CLS_MODEL_PATH = os.path.join(MODEL_DIR, "resnet50_best.keras")
23
+ SEG_MODEL_PATH = os.path.join(MODEL_DIR, "cia_net_final_sota.keras")
24
+
25
+ # Thresholds
26
+ NUC_THRESHOLD = 0.4 # Nucleus detection sensitivity
27
+ CON_THRESHOLD = 0.3 # Contour detection sensitivity
core/image_processing.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import pandas as pd
4
+ from scipy import ndimage as ndi
5
+ from skimage.segmentation import watershed
6
+ from skimage.feature import peak_local_max
7
+ from skimage.measure import label, regionprops
8
+ from .config import Config
9
+
10
+ class ImageProcessor:
11
+ @staticmethod
12
+ def macenko_normalize(img, Io=240, alpha=1, beta=0.15):
13
+ """Normalizes H&E staining appearance."""
14
+ try:
15
+ HER = np.array([[0.650, 0.704, 0.286], [0.072, 0.990, 0.105], [0.268, 0.570, 0.776]])
16
+ h, w, c = img.shape
17
+ img_flat = img.reshape((-1, 3))
18
+ OD = -np.log((img_flat.astype(float) + 1) / Io)
19
+
20
+ ODhat = OD[np.all(OD > beta, axis=1)]
21
+ if len(ODhat) < 10: return img
22
+
23
+ eigvals, eigvecs = np.linalg.eigh(np.cov(ODhat.T))
24
+ That = ODhat.dot(eigvecs[:, 1:3])
25
+ phi = np.arctan2(That[:, 1], That[:, 0])
26
+ minPhi = np.percentile(phi, alpha)
27
+ maxPhi = np.percentile(phi, 100 - alpha)
28
+
29
+ vMin = eigvecs[:, 1:3].dot(np.array([(np.cos(minPhi), np.sin(minPhi))]).T)
30
+ vMax = eigvecs[:, 1:3].dot(np.array([(np.cos(maxPhi), np.sin(maxPhi))]).T)
31
+
32
+ if vMin[0] > vMax[0]: HE = np.array((vMin[:, 0], vMax[:, 0])).T
33
+ else: HE = np.array((vMax[:, 0], vMin[:, 0])).T
34
+
35
+ Y = np.reshape(OD, (-1, 3)).T
36
+ C = np.linalg.lstsq(HE, Y, rcond=None)[0]
37
+ maxC = np.array([1.9705, 1.0308])
38
+
39
+ Inorm = Io * np.exp(-np.dot(HER[:, 0:2], (C/maxC * maxC)[:, np.newaxis]))
40
+ return np.clip(np.reshape(Inorm.T, (h, w, c)), 0, 255).astype(np.uint8)
41
+ except:
42
+ return img
43
+
44
+ @staticmethod
45
+ def adaptive_watershed(pred_nuc, pred_con):
46
+ """Separates touching cells using probability topography."""
47
+ nuc_mask = (pred_nuc > Config.NUC_THRESHOLD).astype(np.uint8)
48
+ con_mask = (pred_con > Config.CON_THRESHOLD).astype(np.uint8)
49
+
50
+ # Create markers from nucleus minus contour
51
+ markers_raw = np.clip(nuc_mask - con_mask, 0, 1)
52
+ kernel = np.ones((3,3), np.uint8)
53
+ markers_clean = cv2.morphologyEx(markers_raw, cv2.MORPH_OPEN, kernel, iterations=1)
54
+
55
+ # Find peaks
56
+ distance = ndi.distance_transform_edt(markers_clean)
57
+ coords = peak_local_max(distance, footprint=np.ones((5, 5)), labels=markers_clean, min_distance=5)
58
+ mask = np.zeros(distance.shape, dtype=bool)
59
+ mask[tuple(coords.T)] = True
60
+ markers, _ = ndi.label(mask)
61
+
62
+ # Expand markers
63
+ return watershed(-distance, markers, mask=nuc_mask)
64
+
65
+ @staticmethod
66
+ def calculate_morphometrics(label_mask):
67
+ """Calculates biological features for each cell."""
68
+ regions = regionprops(label_mask)
69
+ stats = []
70
+ for prop in regions:
71
+ area = prop.area
72
+ if area < 30: continue # Noise filter
73
+ perimeter = prop.perimeter
74
+ if perimeter == 0: continue
75
+
76
+ # Metric calculations
77
+ circularity = (4 * np.pi * area) / (perimeter ** 2)
78
+ aspect_ratio = prop.major_axis_length / (prop.minor_axis_length + 1e-5)
79
+
80
+ stats.append({
81
+ 'Area': area,
82
+ 'Perimeter': int(perimeter),
83
+ 'Circularity': round(circularity, 3),
84
+ 'Solidity': round(prop.solidity, 3),
85
+ 'Aspect_Ratio': round(aspect_ratio, 2)
86
+ })
87
+ return pd.DataFrame(stats)
88
+
89
+ @staticmethod
90
+ def calculate_entropy(prob_map):
91
+ """Calculates Shannon Entropy (Uncertainty Map)."""
92
+ prob_map = np.clip(prob_map, 1e-7, 1-1e-7)
93
+ entropy = - (prob_map * np.log(prob_map) + (1-prob_map) * np.log(1-prob_map))
94
+ return entropy
core/inference.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
4
+
5
+ import tensorflow as tf
6
+ tf.get_logger().setLevel(logging.ERROR)
7
+
8
+ import numpy as np
9
+ import cv2
10
+ from tensorflow.keras import models
11
+ from .config import Config
12
+
13
+ # --- CUSTOM OBJECTS ---
14
+ @tf.keras.utils.register_keras_serializable()
15
+ class SmoothTruncatedLoss(tf.keras.losses.Loss):
16
+ def __init__(self, gamma=0.2, name="smooth_truncated_loss", **kwargs):
17
+ super().__init__(name=name, **kwargs)
18
+ self.gamma = gamma
19
+ def call(self, y_true, y_pred):
20
+ y_true = tf.cast(y_true, tf.float32)
21
+ y_pred = tf.cast(y_pred, tf.float32)
22
+ y_pred = tf.clip_by_value(y_pred, 1e-7, 1.0 - 1e-7)
23
+ pt = tf.where(tf.equal(y_true, 1), y_pred, 1 - y_pred)
24
+ loss_outlier = -tf.math.log(self.gamma) + 0.5 * (1 - (pt**2)/(self.gamma**2))
25
+ loss_inlier = -tf.math.log(pt)
26
+ return tf.reduce_mean(tf.where(pt < self.gamma, loss_outlier, loss_inlier))
27
+ def get_config(self):
28
+ config = super().get_config()
29
+ config.update({"gamma": self.gamma})
30
+ return config
31
+
32
+ def soft_dice_loss(y_true, y_pred): return 1.0
33
+ def dice_coef(y_true, y_pred): return 1.0
34
+
35
+ class InferenceEngine:
36
+ _instance = None
37
+
38
+ def __new__(cls):
39
+ if cls._instance is None:
40
+ cls._instance = super(InferenceEngine, cls).__new__(cls)
41
+ cls._instance.classifier = None
42
+ cls._instance.segmenter = None
43
+ return cls._instance
44
+
45
+ def load_models(self):
46
+ if self.classifier is not None and self.segmenter is not None:
47
+ return True
48
+
49
+ print(f"Loading models from: {Config.MODEL_DIR}")
50
+
51
+ try:
52
+ if not os.path.exists(Config.CLS_MODEL_PATH): return False
53
+ self.classifier = tf.keras.models.load_model(Config.CLS_MODEL_PATH, compile=False)
54
+
55
+ if not os.path.exists(Config.SEG_MODEL_PATH): return False
56
+ custom_objects = {
57
+ 'SmoothTruncatedLoss': SmoothTruncatedLoss,
58
+ 'soft_dice_loss': soft_dice_loss,
59
+ 'dice_coef': dice_coef
60
+ }
61
+ self.segmenter = tf.keras.models.load_model(Config.SEG_MODEL_PATH, custom_objects=custom_objects, compile=False)
62
+
63
+ return True
64
+ except Exception as e:
65
+ print(f"Model Load Error: {e}")
66
+ return False
67
+
68
+ def predict_classification(self, img):
69
+ if self.classifier is None: raise RuntimeError("Classifier not loaded!")
70
+ img_resized = cv2.resize(img, Config.IMG_SIZE)
71
+ img_tensor = np.expand_dims(img_resized.astype(np.float32) / 255.0, axis=0)
72
+ preds = self.classifier.predict(img_tensor, verbose=0)
73
+ return np.argmax(preds), np.max(preds), img_tensor
74
+
75
+ def predict_segmentation(self, img):
76
+ if self.segmenter is None: raise RuntimeError("Segmenter not loaded!")
77
+
78
+ # --- BOYUT HATASI İÇİN DÜZELTME ---
79
+ # Model sabit 224x224 giriş bekliyor.
80
+ # Görüntüyü ne olursa olsun 224x224 yapıyoruz.
81
+ h_orig, w_orig, _ = img.shape
82
+
83
+ img_resized = cv2.resize(img, (224, 224))
84
+ img_tensor = np.expand_dims(img_resized.astype(np.float32) / 255.0, axis=0)
85
+
86
+ preds = self.segmenter.predict(img_tensor, verbose=0)
87
+
88
+ # Çıktıları al (Nuclei ve Contour)
89
+ nuc_prob = preds[0][0, :, :, 0]
90
+ con_prob = preds[1][0, :, :, 0]
91
+
92
+ # Segmentasyon Güven Skoru
93
+ # Sadece hücre olduğunu düşündüğü (>0.5) piksellerin ortalamasını al
94
+ mask_indices = nuc_prob > 0.5
95
+ if np.any(mask_indices):
96
+ seg_confidence = np.mean(nuc_prob[mask_indices])
97
+ else:
98
+ seg_confidence = 0.0
99
+
100
+ # Çıktıyı orijinal boyuta geri büyüt (Görselleştirme için)
101
+ nuc_final = cv2.resize(nuc_prob, (w_orig, h_orig))
102
+ con_final = cv2.resize(con_prob, (w_orig, h_orig))
103
+
104
+ return nuc_final, con_final, seg_confidence
105
+
106
+ def _find_target_layer(self):
107
+ # ResNet50'nin bilinen son conv katmanını ara
108
+ for layer in self.classifier.layers:
109
+ if 'resnet50' in layer.name: # Nested model varsa
110
+ try:
111
+ return layer.get_layer('conv5_block3_out').output, layer.output
112
+ except:
113
+ pass
114
+ # Düz modelse
115
+ try:
116
+ return self.classifier.get_layer('conv5_block3_out').output, self.classifier.output
117
+ except:
118
+ return None, None
119
+
120
+ def generate_gradcam(self, img_tensor, class_idx):
121
+ """Generate Grad-CAM heatmap - Keras 3 compatible version for nested ResNet50."""
122
+ if self.classifier is None:
123
+ return np.zeros((224, 224))
124
+
125
+ try:
126
+ print("Grad-CAM baslatiliyor...")
127
+
128
+ # Model yapısını analiz et
129
+ print(f"Model katman sayisi: {len(self.classifier.layers)}")
130
+
131
+ # ResNet50 base modelini bul (nested model olarak)
132
+ base_model = None
133
+ base_model_layer_idx = None
134
+ for idx, layer in enumerate(self.classifier.layers):
135
+ if 'resnet' in layer.name.lower():
136
+ base_model = layer
137
+ base_model_layer_idx = idx
138
+ print(f"Base model bulundu: {layer.name} (index: {idx})")
139
+ break
140
+
141
+ if base_model is not None:
142
+ # Nested model yapısı - ResNet50 bir Functional model olarak içeride
143
+ print("Nested model yapisi tespit edildi")
144
+
145
+ # ResNet50'nin son conv katmanını bul
146
+ try:
147
+ last_conv_layer = base_model.get_layer('conv5_block3_out')
148
+ print(f"Son conv katmani: conv5_block3_out")
149
+ except Exception as e:
150
+ print(f"conv5_block3_out bulunamadi: {e}")
151
+ return self._activation_based_cam(img_tensor)
152
+
153
+ # Yöntem: GradientTape ile manuel hesaplama
154
+ img_tf = tf.convert_to_tensor(img_tensor, dtype=tf.float32)
155
+
156
+ # Feature extractor model - sadece conv output için
157
+ feature_extractor = tf.keras.Model(
158
+ inputs=base_model.input,
159
+ outputs=last_conv_layer.output
160
+ )
161
+
162
+ print("Gradient hesaplaniyor (nested model)...")
163
+
164
+ with tf.GradientTape(persistent=True) as tape:
165
+ tape.watch(img_tf)
166
+
167
+ # ResNet50'den feature map al
168
+ conv_output = feature_extractor(img_tf, training=False)
169
+ tape.watch(conv_output)
170
+
171
+ # Tam model prediction
172
+ predictions = self.classifier(img_tf, training=False)
173
+
174
+ # Hedef sınıf skoru
175
+ if class_idx < predictions.shape[-1]:
176
+ class_score = predictions[0, class_idx]
177
+ else:
178
+ class_score = predictions[0, 0]
179
+
180
+ print(f"Class score: {class_score.numpy():.4f}")
181
+
182
+ # Gradient hesapla
183
+ grads = tape.gradient(class_score, conv_output)
184
+ del tape
185
+
186
+ if grads is None:
187
+ print("Gradient None - activation based CAM deneniyor...")
188
+ return self._activation_based_cam(img_tensor)
189
+
190
+ print(f"Gradient shape: {grads.shape}")
191
+
192
+ else:
193
+ # Düz model yapısı
194
+ print("Duz model yapisi - dogrudan katman aranacak")
195
+
196
+ # conv5_block3_out veya son Conv2D katmanını bul
197
+ last_conv_layer = None
198
+ try:
199
+ last_conv_layer = self.classifier.get_layer('conv5_block3_out')
200
+ except:
201
+ for layer in reversed(self.classifier.layers):
202
+ if isinstance(layer, tf.keras.layers.Conv2D):
203
+ last_conv_layer = layer
204
+ break
205
+
206
+ if last_conv_layer is None:
207
+ print("Conv katmani bulunamadi")
208
+ return self._activation_based_cam(img_tensor)
209
+
210
+ print(f"Son conv katmani: {last_conv_layer.name}")
211
+
212
+ # Grad model oluştur
213
+ grad_model = tf.keras.Model(
214
+ inputs=self.classifier.input,
215
+ outputs=[last_conv_layer.output, self.classifier.output]
216
+ )
217
+
218
+ img_tf = tf.convert_to_tensor(img_tensor, dtype=tf.float32)
219
+
220
+ print("Gradient hesaplaniyor (duz model)...")
221
+
222
+ with tf.GradientTape() as tape:
223
+ conv_output, predictions = grad_model(img_tf, training=False)
224
+
225
+ if class_idx < predictions.shape[-1]:
226
+ class_score = predictions[0, class_idx]
227
+ else:
228
+ class_score = predictions[0, 0]
229
+
230
+ grads = tape.gradient(class_score, conv_output)
231
+
232
+ if grads is None:
233
+ print("Gradient None")
234
+ return self._activation_based_cam(img_tensor)
235
+
236
+ # Grad-CAM hesaplama
237
+ pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
238
+
239
+ conv_output_np = conv_output[0].numpy()
240
+ pooled_grads_np = pooled_grads.numpy()
241
+
242
+ print(f"Conv output shape: {conv_output_np.shape}")
243
+ print(f"Pooled grads shape: {pooled_grads_np.shape}")
244
+
245
+ # Weighted sum
246
+ heatmap = np.zeros(conv_output_np.shape[:2], dtype=np.float32)
247
+ for i in range(len(pooled_grads_np)):
248
+ heatmap += pooled_grads_np[i] * conv_output_np[:, :, i]
249
+
250
+ # ReLU ve normalize
251
+ heatmap = np.maximum(heatmap, 0)
252
+ if np.max(heatmap) > 0:
253
+ heatmap = heatmap / np.max(heatmap)
254
+
255
+ print(f"Heatmap shape: {heatmap.shape}, max: {np.max(heatmap):.4f}")
256
+ print("Grad-CAM basariyla olusturuldu.")
257
+ return heatmap
258
+
259
+ except Exception as e:
260
+ print(f"Grad-CAM Error: {e}")
261
+ import traceback
262
+ traceback.print_exc()
263
+ return self._activation_based_cam(img_tensor)
264
+
265
+ def _activation_based_cam(self, img_tensor):
266
+ """Fallback: Activation-based Class Activation Map (gradient gerektirmez)."""
267
+ try:
268
+ print("Activation-based CAM kullaniliyor...")
269
+
270
+ # ResNet50 base modelini bul
271
+ base_model = None
272
+ for layer in self.classifier.layers:
273
+ if 'resnet' in layer.name.lower():
274
+ base_model = layer
275
+ break
276
+
277
+ if base_model is not None:
278
+ # Nested model
279
+ try:
280
+ last_conv = base_model.get_layer('conv5_block3_out')
281
+ except:
282
+ # Son conv katmanını bul
283
+ last_conv = None
284
+ for layer in reversed(base_model.layers):
285
+ if 'conv' in layer.name and hasattr(layer, 'output'):
286
+ last_conv = layer
287
+ break
288
+
289
+ if last_conv is None:
290
+ return np.zeros((224, 224))
291
+
292
+ # Feature extractor
293
+ feature_model = tf.keras.Model(
294
+ inputs=base_model.input,
295
+ outputs=last_conv.output
296
+ )
297
+ features = feature_model(img_tensor, training=False)
298
+ else:
299
+ # Düz model - son Conv2D bul
300
+ last_conv = None
301
+ for layer in reversed(self.classifier.layers):
302
+ if isinstance(layer, tf.keras.layers.Conv2D):
303
+ last_conv = layer
304
+ break
305
+
306
+ if last_conv is None:
307
+ return np.zeros((224, 224))
308
+
309
+ feature_model = tf.keras.Model(
310
+ inputs=self.classifier.input,
311
+ outputs=last_conv.output
312
+ )
313
+ features = feature_model(img_tensor, training=False)
314
+
315
+ # Aktivasyonların ortalamasını al
316
+ heatmap = tf.reduce_mean(features, axis=-1)[0].numpy()
317
+ heatmap = np.maximum(heatmap, 0)
318
+
319
+ if np.max(heatmap) > 0:
320
+ heatmap = heatmap / np.max(heatmap)
321
+
322
+ print(f"Activation CAM shape: {heatmap.shape}")
323
+ return heatmap
324
+
325
+ except Exception as e:
326
+ print(f"Activation CAM Error: {e}")
327
+ return np.zeros((224, 224))
core/model_factory.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ from tensorflow.keras import layers, models
3
+ from .config import Config
4
+
5
+ class ModelFactory:
6
+ """Factory class to reconstruct model architectures for weight loading."""
7
+
8
+ @staticmethod
9
+ def build_resnet50_classifier():
10
+ """Reconstructs ResNet50 Classifier."""
11
+ # ImageNet ağırlıklarıyla başlat ki katman isimleri standart olsun
12
+ base_model = tf.keras.applications.ResNet50(
13
+ weights='imagenet',
14
+ include_top=False,
15
+ input_shape=Config.INPUT_SHAPE
16
+ )
17
+ x = base_model.output
18
+ x = layers.GlobalAveragePooling2D()(x)
19
+ x = layers.Dense(512, activation='relu')(x)
20
+ x = layers.Dropout(0.5)(x)
21
+ output = layers.Dense(len(Config.CLASSES), activation='softmax')(x)
22
+ return models.Model(inputs=base_model.input, outputs=output)
23
+
24
+ @staticmethod
25
+ def build_cia_net():
26
+ """Reconstructs CIA-Net Segmentation Model."""
27
+
28
+ def IAM_Module(nuc, con, filters):
29
+ concat = layers.Concatenate()([nuc, con])
30
+ smooth = layers.Conv2D(filters, 3, padding='same')(concat)
31
+ nuc_refine = layers.Conv2D(filters, 3, padding='same', activation='relu')(smooth)
32
+ con_refine = layers.Conv2D(filters, 3, padding='same', activation='relu')(smooth)
33
+ return nuc_refine, con_refine
34
+
35
+ inputs = layers.Input(shape=(None, None, 3))
36
+
37
+ # ImageNet ağırlıklarıyla başlat
38
+ base = tf.keras.applications.DenseNet121(
39
+ include_top=False,
40
+ weights='imagenet',
41
+ input_tensor=inputs
42
+ )
43
+
44
+ # Feature Extraction Layers (Standard Keras Names)
45
+ enc1 = base.get_layer('conv1_relu').output
46
+ enc2 = base.get_layer('conv2_block6_concat').output
47
+ enc3 = base.get_layer('conv3_block12_concat').output
48
+ enc4 = base.get_layer('conv4_block24_concat').output
49
+ bottleneck = base.get_layer('relu').output
50
+
51
+ # Decoder Level 4
52
+ x = layers.Conv2D(256, 3, padding='same', activation='relu')(bottleneck)
53
+ x = layers.UpSampling2D()(x)
54
+ enc4_lat = layers.Conv2D(256, 1, padding='same')(enc4)
55
+
56
+ m4 = layers.Add()([x, enc4_lat])
57
+ nuc4, con4 = IAM_Module(m4, m4, 256)
58
+
59
+ # Decoder Level 3
60
+ nuc_up3 = layers.Conv2D(128, 1, padding='same')(layers.UpSampling2D()(nuc4))
61
+ con_up3 = layers.Conv2D(128, 1, padding='same')(layers.UpSampling2D()(con4))
62
+ enc3_lat = layers.Conv2D(128, 1, padding='same')(enc3)
63
+
64
+ nuc_m3 = layers.Add()([nuc_up3, enc3_lat])
65
+ con_m3 = layers.Add()([con_up3, enc3_lat])
66
+ nuc3, con3 = IAM_Module(nuc_m3, con_m3, 128)
67
+
68
+ # Decoder Level 2
69
+ nuc_up2 = layers.Conv2D(64, 1, padding='same')(layers.UpSampling2D()(nuc3))
70
+ con_up2 = layers.Conv2D(64, 1, padding='same')(layers.UpSampling2D()(con3))
71
+ enc2_lat = layers.Conv2D(64, 1, padding='same')(enc2)
72
+
73
+ nuc_m2 = layers.Add()([nuc_up2, enc2_lat])
74
+ con_m2 = layers.Add()([con_up2, enc2_lat])
75
+ nuc2, con2 = IAM_Module(nuc_m2, con_m2, 64)
76
+
77
+ # Decoder Level 1
78
+ nuc_up1 = layers.Conv2D(32, 1, padding='same')(layers.UpSampling2D()(nuc2))
79
+ con_up1 = layers.Conv2D(32, 1, padding='same')(layers.UpSampling2D()(con2))
80
+ enc1_lat = layers.Conv2D(32, 1, padding='same')(enc1)
81
+
82
+ nuc_m1 = layers.Add()([nuc_up1, enc1_lat])
83
+ con_m1 = layers.Add()([con_up1, enc1_lat])
84
+ nuc1, con1 = IAM_Module(nuc_m1, con_m1, 32)
85
+
86
+ # Final Output
87
+ final_nuc = layers.UpSampling2D()(nuc1)
88
+ final_con = layers.UpSampling2D()(con1)
89
+
90
+ out_nuc = layers.Conv2D(1, 1, activation='sigmoid', name='nuclei_output')(final_nuc)
91
+ out_con = layers.Conv2D(1, 1, activation='sigmoid', name='contour_output')(final_con)
92
+
93
+ return models.Model(inputs=inputs, outputs=[out_nuc, out_con])
requirements.txt CHANGED
@@ -1,3 +1,11 @@
1
- altair
2
- pandas
3
- streamlit
 
 
 
 
 
 
 
 
 
1
+ tensorflow>=2.15.0
2
+ numpy>=1.24.3
3
+ pandas>=2.0.3
4
+ opencv-python-headless>=4.8.0
5
+ matplotlib>=3.7.2
6
+ scikit-image>=0.21.0
7
+ scipy>=1.11.1
8
+ streamlit>=1.31.0
9
+ albumentations>=1.3.1
10
+ seaborn>=0.12.2
11
+ requests>=2.31.0
src/streamlit_app.py DELETED
@@ -1,40 +0,0 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
- import streamlit as st
5
-
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ui/__init__.py ADDED
File without changes
ui/dashboard.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ import seaborn as sns
6
+
7
+ def render_css():
8
+ """Injects custom CSS for a professional medical dashboard optimized for horizontal screens."""
9
+ st.markdown("""
10
+ <style>
11
+ .main {
12
+ max-width: 100% !important;
13
+ padding: 1rem 2rem;
14
+ }
15
+ .block-container {
16
+ max-width: 100% !important;
17
+ padding-left: 2rem !important;
18
+ padding-right: 2rem !important;
19
+ }
20
+ h1, h2, h3 {
21
+ font-family: 'Segoe UI', sans-serif;
22
+ font-weight: 600;
23
+ }
24
+
25
+ div[data-testid="stMetric"] {
26
+ padding: 18px;
27
+ border-radius: 12px;
28
+ box-shadow: 0 2px 8px rgba(0,0,0,0.08);
29
+ }
30
+
31
+ img {
32
+ border-radius: 10px;
33
+ box-shadow: 0 4px 12px rgba(0,0,0,0.1);
34
+ image-rendering: -webkit-optimize-contrast;
35
+ image-rendering: crisp-edges;
36
+ }
37
+
38
+ .report-box {
39
+ padding: 24px;
40
+ border-radius: 12px;
41
+ border-left: 6px solid #dc3545;
42
+ margin-bottom: 20px;
43
+ box-shadow: 0 2px 8px rgba(0,0,0,0.08);
44
+ }
45
+
46
+ .stTabs [data-baseweb="tab-list"] {
47
+ gap: 8px;
48
+ }
49
+ .stTabs [data-baseweb="tab"] {
50
+ padding: 12px 24px;
51
+ border-radius: 8px;
52
+ }
53
+
54
+ .dataframe {
55
+ font-size: 0.95rem !important;
56
+ }
57
+ </style>
58
+ """, unsafe_allow_html=True)
59
+
60
+ def render_header(app_name, version):
61
+ st.title(app_name)
62
+ st.caption(f"Klinik Karar Destek Sistemi | Sürüm: {version}")
63
+ st.markdown("---")
64
+
65
+ def apply_heatmap_overlay(img_rgb, heatmap_float, colormap=cv2.COLORMAP_JET, alpha=0.6):
66
+ if np.max(heatmap_float) > 0:
67
+ heatmap_float = heatmap_float / np.max(heatmap_float)
68
+ heatmap_uint8 = np.uint8(255 * heatmap_float)
69
+ heatmap_colored = cv2.applyColorMap(heatmap_uint8, colormap)
70
+ heatmap_colored = cv2.cvtColor(heatmap_colored, cv2.COLOR_BGR2RGB)
71
+
72
+ if heatmap_colored.shape[:2] != img_rgb.shape[:2]:
73
+ heatmap_colored = cv2.resize(heatmap_colored, (img_rgb.shape[1], img_rgb.shape[0]))
74
+
75
+ overlay = cv2.addWeighted(img_rgb, 1-alpha, heatmap_colored, alpha, 0)
76
+ return overlay
77
+
78
+ def render_classification_panel(img_rgb, diagnosis, cls_conf, seg_conf, gradcam_map):
79
+ st.subheader("1. Tanı ve Model Güven Analizi")
80
+
81
+ col_diag, col_orig, col_xai = st.columns([1.2, 1.4, 1.4])
82
+
83
+ with col_diag:
84
+ # Dynamic Styling based on Diagnosis
85
+ color = "#dc3545" if "Benign" not in diagnosis else "#28a745"
86
+ st.markdown(f"""
87
+ <div style="padding: 24px; border-radius: 12px; border-left: 6px solid {color}; box-shadow: 0 4px 12px rgba(0,0,0,0.1);">
88
+ <h3 style="margin:0; color: {color} !important; font-size: 1.8rem;">{diagnosis}</h3>
89
+ <p style="margin-top: 12px;">Yapay Zeka Nihai Kararı</p>
90
+ </div>
91
+ """, unsafe_allow_html=True)
92
+
93
+ st.markdown("#### Güvenilirlik Metrikleri")
94
+ c1, c2 = st.columns(2)
95
+ c1.metric("Teşhis Güveni", f"%{cls_conf*100:.1f}", help="ResNet50 modelinin sınıflandırma kesinliği.")
96
+ c2.metric("Segmentasyon Güveni", f"%{seg_conf*100:.1f}", help="CIA-Net modelinin hücre tespit kesinliği (Ortalama Piksel Olasılığı).")
97
+
98
+ if cls_conf < 0.70:
99
+ st.warning("Düşük güven skoru. Lütfen manuel inceleme yapınız.")
100
+
101
+ with col_orig:
102
+ st.image(img_rgb, caption="Orijinal Görüntü", width="stretch")
103
+
104
+ with col_xai:
105
+ overlay = apply_heatmap_overlay(img_rgb, gradcam_map, alpha=0.5)
106
+ st.image(overlay, caption="Yapay Zeka Odak Alanları (Grad-CAM)", width="stretch")
107
+
108
+ def render_segmentation_panel(img_rgb, nuc_map, uncertainty_map, instance_mask, stats):
109
+ st.markdown("---")
110
+ st.subheader("2. Hücresel Morfoloji ve Biyolojik Analiz")
111
+
112
+ tab_seg, tab_unc, tab_data, tab_plots = st.tabs([
113
+ "Segmentasyon",
114
+ "Belirsizlik (Uncertainty)",
115
+ "Kantitatif Veriler",
116
+ "Dağılım Grafikleri"
117
+ ])
118
+
119
+ with tab_seg:
120
+ c1, c2 = st.columns(2)
121
+ with c1:
122
+ nuc_colored = apply_heatmap_overlay(img_rgb, nuc_map, colormap=cv2.COLORMAP_OCEAN, alpha=0.6)
123
+ st.image(nuc_colored, caption="Nükleus Olasılık Haritası (AI Çıktısı)", width="stretch")
124
+ with c2:
125
+ mask_rgb = np.zeros_like(img_rgb)
126
+ mask_rgb[instance_mask > 0] = [0, 255, 0] # Green
127
+ overlay = cv2.addWeighted(img_rgb, 0.7, mask_rgb, 0.3, 0)
128
+ st.image(overlay, caption="Ayrıştırılmış Hücreler (Watershed)", width="stretch")
129
+
130
+ with tab_unc:
131
+ c1, c2 = st.columns([1, 2])
132
+ with c1:
133
+ st.info("""
134
+ **Nasıl Okunmalı?**
135
+ * **Siyah/Koyu Alanlar:** Modelin kararından %100 emin olduğu bölgeler.
136
+ * **Parlak/Sarı Alanlar:** Modelin kararsız kaldığı ("Burası hücre mi değil mi?") bölgeler.
137
+
138
+ Sarı alanların çokluğu, görüntünün kalitesiz veya dokunun karmaşık olduğunu gösterir.
139
+ """)
140
+ with c2:
141
+ unc_colored = apply_heatmap_overlay(img_rgb, uncertainty_map, colormap=cv2.COLORMAP_INFERNO, alpha=0.7)
142
+ st.image(unc_colored, caption="Model Entropi (Belirsizlik) Haritası", width="stretch")
143
+
144
+ with tab_data:
145
+ if not stats.empty:
146
+ m1, m2, m3, m4 = st.columns(4)
147
+ m1.metric("Toplam Hücre", f"{len(stats)}")
148
+ m2.metric("Ort. Alan", f"{stats['Area'].mean():.1f} px")
149
+ m3.metric("Düzensizlik", f"{1 - stats['Circularity'].mean():.2f}", help="0'a yaklaştıkça hücreler daha yuvarlak (sağlıklı) demektir.")
150
+ m4.metric("Varyasyon", f"{stats['Area'].std():.1f}", help="Yüksek varyasyon (Anizonükleoz) kanser belirtisi olabilir.")
151
+
152
+ st.dataframe(
153
+ stats.style.background_gradient(cmap='Reds', subset=['Area'])
154
+ .format("{:.2f}"),
155
+ width="stretch"
156
+ )
157
+ else:
158
+ st.warning("Hücre tespit edilemedi.")
159
+
160
+ with tab_plots:
161
+ if not stats.empty:
162
+ # HD Graphics Settings - High DPI
163
+ plt.style.use('seaborn-v0_8-whitegrid')
164
+ sns.set_context("notebook", font_scale=1.3)
165
+ sns.set_palette("husl")
166
+
167
+ c1, c2 = st.columns(2)
168
+ with c1:
169
+ fig, ax = plt.subplots(figsize=(10, 6), dpi=150)
170
+ sns.histplot(stats['Area'], kde=True, ax=ax, color='#3498db', fill=True, alpha=0.7, linewidth=2)
171
+ ax.set_title("Hücre Boyut Dağılımı (Histogram)", fontsize=16, fontweight='bold', pad=20)
172
+ ax.set_xlabel("Alan (Piksel)", fontsize=13, fontweight='600')
173
+ ax.set_ylabel("Frekans", fontsize=13, fontweight='600')
174
+ ax.spines['top'].set_visible(False)
175
+ ax.spines['right'].set_visible(False)
176
+ ax.grid(True, alpha=0.3, linestyle='--')
177
+ plt.tight_layout()
178
+ st.pyplot(fig)
179
+ plt.close()
180
+
181
+ with c2:
182
+ fig, ax = plt.subplots(figsize=(10, 6), dpi=150)
183
+ scatter = sns.scatterplot(data=stats, x='Area', y='Circularity', hue='Solidity',
184
+ ax=ax, palette='viridis', s=100, alpha=0.8, edgecolor='white', linewidth=1.5)
185
+ ax.set_title("Boyut vs. Şekil Düzensizliği", fontsize=16, fontweight='bold', pad=20)
186
+ ax.set_xlabel("Alan (Piksel)", fontsize=13, fontweight='600')
187
+ ax.set_ylabel("Dairesellik", fontsize=13, fontweight='600')
188
+ ax.spines['top'].set_visible(False)
189
+ ax.spines['right'].set_visible(False)
190
+ ax.grid(True, alpha=0.3, linestyle='--')
191
+ ax.legend(title='Solidity', title_fontsize=11, fontsize=10, loc='best', frameon=True,
192
+ fancybox=True, shadow=True, framealpha=0.95)
193
+ plt.tight_layout()
194
+ st.pyplot(fig)
195
+ plt.close()