GarimaSharma75 commited on
Commit
a1ad22e
Β·
verified Β·
1 Parent(s): d13e337

Upload 11 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ leukemia_streamlit/saved_leukemia_ensemble/CustomCNN_model.keras filter=lfs diff=lfs merge=lfs -text
37
+ leukemia_streamlit/saved_leukemia_ensemble/DenseNet121_model.keras filter=lfs diff=lfs merge=lfs -text
38
+ leukemia_streamlit/saved_leukemia_ensemble/MobileNetV2_model.keras filter=lfs diff=lfs merge=lfs -text
39
+ leukemia_streamlit/saved_leukemia_ensemble/VGG16_model.keras filter=lfs diff=lfs merge=lfs -text
leukemia_streamlit/.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *.keras filter=lfs diff=lfs merge=lfs -text
2
+ *.pkl filter=lfs diff=lfs merge=lfs -text
leukemia_streamlit/app.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
3
+
4
+ import streamlit as st
5
+ import numpy as np
6
+ import os
7
+ import pickle
8
+ import zipfile
9
+ import matplotlib.pyplot as plt
10
+ from tensorflow.keras.models import load_model
11
+ from tensorflow.keras.preprocessing import image
12
+
13
+ # --- CONFIG ---
14
+ st.set_page_config(page_title="Leukemia Subtype Detector", layout="centered")
15
+
16
+ st.markdown("""
17
+ <div style='background-color:#57068c;padding:20px;border-radius:10px'>
18
+ <h2 style='color:white;text-align:center'>🧬 Leukemia Subtype Detection</h2>
19
+ <p style='color:white;text-align:center'>
20
+ Uses an ensemble of 4 models: <b>DenseNet121</b>, <b>MobileNetV2</b>, <b>VGG16</b>, and <b>Custom CNN</b>.<br>
21
+ Predicts: <i>Benign</i>, <i>Pre</i>, <i>Pro</i>, <i>Early</i>.
22
+ </p>
23
+ </div>
24
+ """, unsafe_allow_html=True)
25
+
26
+ # --- CONSTANTS ---
27
+ IMG_HEIGHT, IMG_WIDTH = 224, 224
28
+ CLASS_NAMES = ['Benign', 'Pre', 'Pro', 'Early']
29
+ SAVE_DIR = 'saved_leukemia_ensemble'
30
+ MODEL_ZIPS = {
31
+ "DenseNet121": "DenseNet121_model.zip",
32
+ "MobileNetV2": "MobileNetV2_model.zip",
33
+ "VGG16": "VGG16_model.zip",
34
+ "CustomCNN": "CustomCNN_model.zip"
35
+ }
36
+ ENSEMBLE_WEIGHTS = {
37
+ "DenseNet121": 0.28,
38
+ "MobileNetV2": 0.30,
39
+ "VGG16": 0.22,
40
+ "CustomCNN": 0.20
41
+ }
42
+ HISTORY_PATHS = {
43
+ name: os.path.join(SAVE_DIR, f"{name}_history.pkl") for name in MODEL_ZIPS
44
+ }
45
+
46
+ # --- UTIL FUNCTION ---
47
+ def extract_model_if_needed(zip_path, output_path):
48
+ if not os.path.exists(output_path):
49
+ with zipfile.ZipFile(zip_path, 'r') as zip_ref:
50
+ zip_ref.extractall(os.path.dirname(output_path))
51
+
52
+ # --- LOAD MODELS ---
53
+ @st.cache_resource
54
+ def load_all_models():
55
+ models = {}
56
+ for name, zip_file in MODEL_ZIPS.items():
57
+ zip_path = os.path.join(SAVE_DIR, zip_file)
58
+ keras_path = zip_path.replace(".zip", ".keras")
59
+
60
+ extract_model_if_needed(zip_path, keras_path)
61
+
62
+ if os.path.exists(keras_path):
63
+ models[name] = load_model(keras_path, compile=False)
64
+ else:
65
+ st.warning(f"❌ Model not found: {keras_path}")
66
+ return models
67
+
68
+ models = load_all_models()
69
+
70
+ # --- UPLOAD IMAGE ---
71
+ uploaded_file = st.file_uploader("πŸ“ Upload a blood smear image", type=["jpg", "jpeg", "png"])
72
+
73
+ if uploaded_file:
74
+ st.image(uploaded_file, caption="Uploaded Image", use_container_width=True)
75
+
76
+ if st.button("πŸ” Enter"):
77
+ with st.spinner("⏳ Please wait while results are being computed..."):
78
+ try:
79
+ img = image.load_img(uploaded_file, target_size=(IMG_HEIGHT, IMG_WIDTH))
80
+ img_array = image.img_to_array(img) / 255.0
81
+ img_array = np.expand_dims(img_array, axis=0)
82
+
83
+ st.markdown("### πŸ§ͺ Model Predictions:")
84
+ col1, col2 = st.columns(2)
85
+ individual_preds = {}
86
+
87
+ for i, (name, model) in enumerate(models.items()):
88
+ pred = model.predict(img_array)
89
+ individual_preds[name] = pred
90
+ cls = CLASS_NAMES[np.argmax(pred)]
91
+ conf = pred[0][np.argmax(pred)]
92
+ with [col1, col2][i % 2]:
93
+ st.info(f"**{name}** ➜ `{cls}` ({conf:.2%})")
94
+
95
+ ensemble_pred = sum(ENSEMBLE_WEIGHTS[name] * pred for name, pred in individual_preds.items())
96
+ final_class = CLASS_NAMES[np.argmax(ensemble_pred)]
97
+ final_conf = float(np.max(ensemble_pred))
98
+
99
+ st.markdown(f"""
100
+ <div style="background-color:#c6f6d5;padding:15px;border-radius:10px">
101
+ <h4 style="color:#2f855a">βœ… Ensemble Prediction: <b>{final_class}</b></h4>
102
+ <p style="font-size:16px;color:#22543d">Confidence: <b>{final_conf:.2%}</b></p>
103
+ </div>
104
+ """, unsafe_allow_html=True)
105
+
106
+ st.bar_chart({CLASS_NAMES[i]: float(ensemble_pred[0][i]) for i in range(4)})
107
+
108
+ except Exception as e:
109
+ st.error(f"⚠️ Error: {e}")
110
+ else:
111
+ st.info("πŸ‘ˆ Please upload an image to begin.")
112
+
113
+ # --- OPTIONAL TRAINING VISUALIZATION ---
114
+ st.markdown("---")
115
+ st.subheader("πŸ“ˆ Model Training History")
116
+
117
+ if st.checkbox("Show training curves"):
118
+ for name, path in HISTORY_PATHS.items():
119
+ if os.path.exists(path):
120
+ with open(path, "rb") as f:
121
+ hist = pickle.load(f)
122
+ acc = hist['accuracy']
123
+ val_acc = hist['val_accuracy']
124
+ loss = hist['loss']
125
+ val_loss = hist['val_loss']
126
+
127
+ fig, ax = plt.subplots(1, 2, figsize=(12, 4))
128
+ ax[0].plot(acc, label='Train Acc')
129
+ ax[0].plot(val_acc, label='Val Acc')
130
+ ax[0].set_title(f'{name} Accuracy')
131
+ ax[0].legend()
132
+
133
+ ax[1].plot(loss, label='Train Loss')
134
+ ax[1].plot(val_loss, label='Val Loss')
135
+ ax[1].set_title(f'{name} Loss')
136
+ ax[1].legend()
137
+
138
+ st.pyplot(fig)
139
+ else:
140
+ st.warning(f"No training history found for {name}")
leukemia_streamlit/saved_leukemia_ensemble/CustomCNN_history.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6748fd7226aa4a81dde727f88fec29d61850ca6b445202a3bfcf6b4918e36f39
3
+ size 996
leukemia_streamlit/saved_leukemia_ensemble/CustomCNN_model.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67e1de995fef5c6dbfd630d1b86726263eb6ca8d2e593129cc5e4c664c3ab25b
3
+ size 309466994
leukemia_streamlit/saved_leukemia_ensemble/DenseNet121_history.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5957e0c87de6deb1dcdd62703c84515cb807b37d5530ddaaa14ef121d5a9b128
3
+ size 996
leukemia_streamlit/saved_leukemia_ensemble/DenseNet121_model.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:145449086ddc01f1d1262c3e743be985a80ed2a2e58170f97be5c26d903a9769
3
+ size 29708210
leukemia_streamlit/saved_leukemia_ensemble/MobileNetV2_history.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f3fb78ebd25d43e9504a7c53ef2ddcf5653e1056ceffce1f687255bf4670041
3
+ size 996
leukemia_streamlit/saved_leukemia_ensemble/MobileNetV2_model.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0dc9b4510775f9d4f7f6a1a1b70f42db3c5ea1e4730decc3ce07b0dcb2b2b18
3
+ size 9674100
leukemia_streamlit/saved_leukemia_ensemble/VGG16_history.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bf4705734735de96652202338d018fe7b9186d11ca60eec9a18f9a6b17959d1
3
+ size 996
leukemia_streamlit/saved_leukemia_ensemble/VGG16_model.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8298c14acb7a5a6e81904417c0a603d6ee0d27ee030781fe067410d03a4f39fa
3
+ size 58967202
leukemia_streamlit/streamlit.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import requests
4
+ import tensorflow as tf
5
+ from tensorflow.keras.models import load_model
6
+ import numpy as np
7
+ from PIL import Image
8
+
9
+ # πŸ”§ Configure the Streamlit page
10
+ st.set_page_config(page_title="Leukemia Subtype Detection", layout="centered")
11
+ st.markdown("<h1 style='text-align: center; color: #d6336c;'>πŸŽ—οΈ Leukemia Subtype Detection</h1>", unsafe_allow_html=True)
12
+ st.markdown("<p style='text-align: center; font-size:18px;'>Upload a blood smear image and detect the leukemia subtype using deep learning models.</p>", unsafe_allow_html=True)
13
+
14
+ # πŸ”— Hugging Face Base URL (confirmed valid)
15
+ HF_BASE = "https://huggingface.co/GarimaSharma75/Leukemia_Subtype/resolve/main/"
16
+ SAVE_DIR = "models"
17
+
18
+ # πŸ“Œ Class labels
19
+ CLASS_NAMES = ["Early Pre-B ALL", "Pre-B ALL", "Pro-B ALL", "Healthy"]
20
+
21
+ # πŸ”’ Model files hosted on Hugging Face
22
+ model_files = {
23
+ "DenseNet121": "DenseNet121_model.keras",
24
+ "MobileNetV2": "MobileNetV2_model.keras",
25
+ "VGG16": "VGG16_model.keras",
26
+ "CustomCNN": "CustomCNN_model.keras"
27
+ }
28
+
29
+ # πŸ“₯ Download the model file from Hugging Face if it's not saved locally
30
+ def download_if_not_exists(filename):
31
+ os.makedirs(SAVE_DIR, exist_ok=True)
32
+ filepath = os.path.join(SAVE_DIR, filename)
33
+ if not os.path.exists(filepath):
34
+ url = HF_BASE + filename
35
+ with st.spinner(f"πŸ“₯ Downloading `{filename}`..."):
36
+ response = requests.get(url)
37
+ if response.status_code == 200:
38
+ with open(filepath, "wb") as f:
39
+ f.write(response.content)
40
+ else:
41
+ st.error(f"❌ Failed to download {filename} from Hugging Face.")
42
+ st.stop()
43
+ return filepath
44
+
45
+ # 🧼 Image preprocessing
46
+ def preprocess(img):
47
+ img = img.resize((224, 224))
48
+ img_array = np.array(img) / 255.0
49
+ return np.expand_dims(img_array, axis=0)
50
+
51
+ # πŸ“ Sidebar: Model selection and info
52
+ with st.sidebar:
53
+ st.markdown("## 🧠 Select Model")
54
+ selected_model = st.selectbox("Choose one model to run", list(model_files.keys()))
55
+ st.markdown("### ℹ️ About the Models")
56
+ st.info("""
57
+ β€’ **DenseNet121** – Deep CNN with dense connections
58
+ β€’ **MobileNetV2** – Lightweight CNN
59
+ β€’ **VGG16** – Classic 16-layer CNN
60
+ β€’ **CustomCNN** – Custom-built architecture
61
+ """)
62
+ st.markdown("---")
63
+ st.markdown("Made by **Garima Sharma** πŸ’–")
64
+
65
+ # πŸ“€ Upload image
66
+ uploaded_file = st.file_uploader("πŸ“€ Upload a blood smear image (JPG/PNG)", type=["jpg", "jpeg", "png"])
67
+
68
+ if uploaded_file:
69
+ img = Image.open(uploaded_file).convert("RGB")
70
+ st.image(img, caption="Uploaded Image", use_container_width=True)
71
+
72
+ if st.button("πŸ” Run Detection"):
73
+ with st.spinner("⏳ Please wait while the model is downloading and predicting..."):
74
+ input_data = preprocess(img)
75
+ model_path = download_if_not_exists(model_files[selected_model])
76
+ model = load_model(model_path)
77
+ preds = model.predict(input_data)
78
+ pred_class = CLASS_NAMES[np.argmax(preds)]
79
+ prob = np.max(preds) * 100
80
+
81
+ st.success(f"βœ… **{selected_model}** predicts: **{pred_class}** with `{prob:.2f}%` confidence")
82
+ else:
83
+ st.warning("πŸ“Ž Please upload an image to get started.")