saad1BM commited on
Commit
2cc48b9
·
verified ·
1 Parent(s): 379bb22

Upload 4 files

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. app.py +140 -0
  3. brain_tumor_model.keras +3 -0
  4. requirements.txt +166 -0
  5. train_model.py +83 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ brain_tumor_model.keras filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import shutil # <-- Nayi library for file copy (Fixes cross-device link error)
3
+ import tensorflow as tf
4
+ from tensorflow.keras.models import load_model
5
+ import numpy as np
6
+ from PIL import Image
7
+ from huggingface_hub import hf_hub_download
8
+ import os
9
+
10
+ # --- 1. Configuration (Constants) ---
11
+ # Yahan aapka confirmed Repo ID use ho raha hai
12
+ HF_REPO_ID = "saad1BM/brain-tumor-detection"
13
+ MODEL_FILENAME = "brain_tumor_model.keras"
14
+ MODEL_PATH = MODEL_FILENAME
15
+
16
+ IMAGE_SIZE = (224, 224)
17
+ CLASS_NAMES = ['glioma_tumor', 'meningioma_tumor', 'no_tumor', 'pituitary_tumor']
18
+
19
+
20
+ # --- 2. Model Loading (FINAL FIX: Cross-Device Link Resolved) ---
21
+ @st.cache_resource
22
+ def load_trained_model():
23
+ """Trained model ko Hugging Face se download aur load karta hai."""
24
+
25
+ # Global variables ko function ke andar access karna
26
+ global HF_REPO_ID, MODEL_FILENAME, MODEL_PATH
27
+
28
+ # Minimum expected model size (approx 100 MB, for size check)
29
+ MIN_SIZE = 100 * 1024 * 1024
30
+
31
+ try:
32
+ # Check karein ki model pehle se download hai ya nahi ya file corrupt/incomplete hai
33
+ if not os.path.exists(MODEL_PATH) or os.path.getsize(MODEL_PATH) < MIN_SIZE:
34
+ st.info(f"Downloading model from Hugging Face: {HF_REPO_ID}/{MODEL_FILENAME}")
35
+
36
+ # hf_hub_download model ko /tmp mein download karega
37
+ downloaded_path = hf_hub_download(
38
+ repo_id=HF_REPO_ID,
39
+ filename=MODEL_FILENAME,
40
+ # Cache ko /tmp mein rakhein
41
+ cache_dir="/tmp/hf_cache",
42
+ )
43
+
44
+ st.info("Copying model file locally to avoid cross-device link error...")
45
+
46
+ # os.rename ki jagah shutil.copyfile use karein (Yeh cross-device error ko theek karta hai)
47
+ shutil.copyfile(downloaded_path, MODEL_PATH)
48
+
49
+ st.success("Model download and copy successful!")
50
+
51
+ # Load the model
52
+ model = load_model(MODEL_PATH)
53
+ return model
54
+
55
+ except Exception as e:
56
+ # Error message mein zaroori details daal di gayi hain
57
+ st.error(f"Model Load Failed. Ensure 'huggingface-hub' is in requirements.txt and file is public. Error: {e}")
58
+ return None
59
+
60
+ model = load_trained_model()
61
+
62
+ # --- 3. Prediction Function ---
63
+ def predict_image(image_file, model):
64
+ """Uploaded image par prediction karta hai."""
65
+ if model is None:
66
+ return "Model Load Failed", 0.0
67
+
68
+ # PIL image ko array mein convert karein
69
+ img = Image.open(image_file).convert("RGB")
70
+ # Image ko model ke input size mein resize karein (224x224)
71
+ img = img.resize(IMAGE_SIZE)
72
+ img_array = np.array(img)
73
+
74
+ # Batch dimension add karein: (224, 224, 3) se (1, 224, 224, 3)
75
+ img_array = np.expand_dims(img_array, axis=0)
76
+
77
+ # Normalization (jaisa training mein kiya tha: 1./255)
78
+ img_array = img_array / 255.0
79
+
80
+ # Prediction karein
81
+ predictions = model.predict(img_array)
82
+
83
+ # Highest probability index aur score nikalien
84
+ predicted_index = np.argmax(predictions, axis=1)[0]
85
+ confidence_score = np.max(predictions) * 100
86
+
87
+ predicted_class = CLASS_NAMES[predicted_index]
88
+
89
+ return predicted_class, confidence_score
90
+
91
+ # --- 4. Streamlit UI ---
92
+
93
+ st.set_page_config(page_title="Brain Tumor Detection", layout="wide")
94
+
95
+ # a) Page Title
96
+ st.title("🧠 Brain Tumor Detection System (AI Powered)")
97
+ st.write("Upload an MRI image below to classify it as one of the tumor types or no tumor.")
98
+ st.markdown("---")
99
+
100
+ col1, col2 = st.columns(2)
101
+
102
+ with col1:
103
+ # b) Image Upload Section
104
+ uploaded_file = st.file_uploader("Upload MRI Image:", type=["jpg", "jpeg", "png"])
105
+
106
+ if uploaded_file is not None:
107
+ # Image Preview
108
+ st.image(uploaded_file, caption="Uploaded MRI Image", use_column_width=True)
109
+ st.markdown("---")
110
+
111
+ # c) Prediction Button
112
+ if st.button("Detect Tumor"):
113
+ st.spinner("Analyzing image and detecting tumor...")
114
+
115
+ # Prediction
116
+ predicted_class, confidence_score = predict_image(uploaded_file, model)
117
+
118
+ # d) Output Section
119
+
120
+ # Simplified classification for the app display (Tumor / No Tumor)
121
+ if predicted_class == 'no_tumor':
122
+ result_label = f"🟢 **Prediction: No Tumor**"
123
+ elif predicted_class == 'Model Load Failed':
124
+ result_label = f"❌ **Prediction: Model Initialization Error**"
125
+ st.error("Model could not be loaded for prediction. Please check logs for Hugging Face download errors.")
126
+ else:
127
+ result_label = f"🔴 **Prediction: Tumor ({predicted_class.replace('_', ' ').title()})**"
128
+
129
+ st.success("✅ Analysis Complete")
130
+ st.subheader(result_label)
131
+ st.metric(label="Confidence Score", value=f"{confidence_score:.2f}%")
132
+
133
+ st.write("---")
134
+
135
+ with col2:
136
+ st.header("Results and Interpretation")
137
+ st.info("The system uses Transfer Learning (VGG16) to classify the image into four categories: Glioma, Meningioma, Pituitary, or No Tumor.")
138
+
139
+ if uploaded_file is None:
140
+ st.warning("Please upload an image and click 'Detect Tumor' to see the results.")
brain_tumor_model.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d20e5a0cf49890f3b06898737a2f0a84dc9febfe70a184da563c967d7846cbf5
3
+ size 136037810
requirements.txt ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.3.1
2
+ aiohappyeyeballs==2.6.1
3
+ aiohttp==3.13.2
4
+ aiosignal==1.4.0
5
+ altair==6.0.0
6
+ annotated-types==0.7.0
7
+ anyio==4.12.0
8
+ astunparse==1.6.3
9
+ async-timeout==4.0.3
10
+ attrs==25.4.0
11
+ backoff==2.2.1
12
+ bcrypt==5.0.0
13
+ blinker==1.9.0
14
+ build==1.3.0
15
+ cachetools==6.2.2
16
+ certifi==2025.11.12
17
+ charset-normalizer==3.4.4
18
+ chromadb==1.3.6
19
+ click==8.3.1
20
+ colorama==0.4.6
21
+ coloredlogs==15.0.1
22
+ dataclasses-json==0.6.7
23
+ distro==1.9.0
24
+ durationpy==0.10
25
+ exceptiongroup==1.3.1
26
+ filelock==3.20.0
27
+ flatbuffers==25.9.23
28
+ frozenlist==1.8.0
29
+ fsspec==2025.12.0
30
+ gast==0.7.0
31
+ gitdb==4.0.12
32
+ GitPython==3.1.45
33
+ google-auth==2.43.0
34
+ google-pasta==0.2.0
35
+ googleapis-common-protos==1.72.0
36
+ greenlet==3.3.0
37
+ grpcio==1.76.0
38
+ h11==0.16.0
39
+ h5py==3.15.1
40
+ hf-xet==1.2.0
41
+ httpcore==1.0.9
42
+ httptools==0.7.1
43
+ httpx==0.28.1
44
+ httpx-sse==0.4.3
45
+ huggingface_hub==1.2.2
46
+ humanfriendly==10.0
47
+ idna==3.11
48
+ importlib_metadata==8.7.0
49
+ importlib_resources==6.5.2
50
+ Jinja2==3.1.6
51
+ jiter==0.12.0
52
+ jsonpatch==1.33
53
+ jsonpointer==3.0.0
54
+ jsonschema==4.25.1
55
+ jsonschema-specifications==2025.9.1
56
+ kagglehub==0.3.13
57
+ keras==3.12.0
58
+ kubernetes==34.1.0
59
+ langchain==1.1.3
60
+ langchain-classic==1.0.0
61
+ langchain-community==0.4.1
62
+ langchain-core==1.1.3
63
+ langchain-openai==1.1.1
64
+ langchain-text-splitters==1.0.0
65
+ langgraph==1.0.4
66
+ langgraph-checkpoint==3.0.1
67
+ langgraph-prebuilt==1.0.5
68
+ langgraph-sdk==0.2.15
69
+ langsmith==0.4.59
70
+ libclang==18.1.1
71
+ Markdown==3.10
72
+ markdown-it-py==4.0.0
73
+ MarkupSafe==3.0.3
74
+ marshmallow==3.26.1
75
+ mdurl==0.1.2
76
+ ml_dtypes==0.5.4
77
+ mmh3==5.2.0
78
+ mpmath==1.3.0
79
+ multidict==6.7.0
80
+ mypy_extensions==1.1.0
81
+ namex==0.1.0
82
+ narwhals==2.13.0
83
+ numpy==2.2.6
84
+ oauthlib==3.3.1
85
+ onnxruntime==1.23.2
86
+ openai==2.9.0
87
+ opentelemetry-api==1.39.0
88
+ opentelemetry-exporter-otlp-proto-common==1.39.0
89
+ opentelemetry-exporter-otlp-proto-grpc==1.39.0
90
+ opentelemetry-proto==1.39.0
91
+ opentelemetry-sdk==1.39.0
92
+ opentelemetry-semantic-conventions==0.60b0
93
+ opt_einsum==3.4.0
94
+ optree==0.18.0
95
+ orjson==3.11.5
96
+ ormsgpack==1.12.0
97
+ overrides==7.7.0
98
+ packaging==25.0
99
+ pandas==2.3.3
100
+ pillow==12.0.0
101
+ posthog==5.4.0
102
+ propcache==0.4.1
103
+ protobuf==6.33.2
104
+ pyarrow==22.0.0
105
+ pyasn1==0.6.1
106
+ pyasn1_modules==0.4.2
107
+ pybase64==1.4.3
108
+ pydantic==2.12.5
109
+ pydantic-settings==2.12.0
110
+ pydantic_core==2.41.5
111
+ pydeck==0.9.1
112
+ Pygments==2.19.2
113
+ pypdf==6.4.1
114
+ PyPika==0.48.9
115
+ pyproject_hooks==1.2.0
116
+ pyreadline3==3.5.4
117
+ python-dateutil==2.9.0.post0
118
+ python-dotenv==1.2.1
119
+ pytz==2025.2
120
+ PyYAML==6.0.3
121
+ referencing==0.37.0
122
+ regex==2025.11.3
123
+ requests==2.32.5
124
+ requests-oauthlib==2.0.0
125
+ requests-toolbelt==1.0.0
126
+ huggingface-hub
127
+ rich==14.2.0
128
+ rpds-py==0.30.0
129
+ rsa==4.9.1
130
+ shellingham==1.5.4
131
+ six==1.17.0
132
+ smmap==5.0.2
133
+ sniffio==1.3.1
134
+ SQLAlchemy==2.0.45
135
+ streamlit==1.52.1
136
+ sympy==1.14.0
137
+ tenacity==9.1.2
138
+ tensorboard==2.20.0
139
+ tensorboard-data-server==0.7.2
140
+ tensorflow==2.20.0
141
+ termcolor==3.2.0
142
+ tiktoken==0.12.0
143
+ tokenizers==0.22.1
144
+ toml==0.10.2
145
+ tomli==2.3.0
146
+ tornado==6.5.3
147
+ tqdm==4.67.1
148
+ typer==0.20.0
149
+ typer-slim==0.20.0
150
+ typing-inspect==0.9.0
151
+ typing-inspection==0.4.2
152
+ typing_extensions==4.15.0
153
+ tzdata==2025.2
154
+ urllib3==2.3.0
155
+ uuid_utils==0.12.0
156
+ uvicorn==0.38.0
157
+ watchdog==6.0.0
158
+ watchfiles==1.1.1
159
+ websocket-client==1.9.0
160
+ websockets==15.0.1
161
+ Werkzeug==3.1.4
162
+ wrapt==2.0.1
163
+ xxhash==3.6.0
164
+ yarl==1.22.0
165
+ zipp==3.23.0
166
+ zstandard==0.25.0
train_model.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ from tensorflow.keras.preprocessing.image import ImageDataGenerator
3
+ from tensorflow.keras.applications import VGG16
4
+ from tensorflow.keras.models import Sequential
5
+ from tensorflow.keras.layers import Dense, Flatten, Dropout
6
+ import os
7
+
8
+ # --- 1. Configuration ---
9
+ # Aapke folder structure ke hisaab se path
10
+ DATA_DIR = 'dataset/Training'
11
+ IMAGE_SIZE = (224, 224) # VGG16 ke liye standard input size
12
+ BATCH_SIZE = 32
13
+ NUM_CLASSES = 4 # gliomat_tumor, meningioma_tumor, no_tumor, pituitary_tumor
14
+
15
+ # --- 2. Data Preprocessing aur Augmentation ---
16
+ # Yahan hum images ko normalize (0 se 1 ke beech) karenge.
17
+ # Aur validation data ke liye, training data mein se hi kuch images nikalenge.
18
+ train_datagen = ImageDataGenerator(
19
+ rescale=1./255,
20
+ validation_split=0.2, # 20% data validation ke liye use hoga
21
+ # Aap data augmentation bhi add kar sakte hain (e.g., rotation, zoom)
22
+ )
23
+
24
+ # Training Data Generator (80% images)
25
+ train_generator = train_datagen.flow_from_directory(
26
+ DATA_DIR,
27
+ target_size=IMAGE_SIZE,
28
+ batch_size=BATCH_SIZE,
29
+ class_mode='categorical', # 4 classes ke liye 'categorical' use karte hain
30
+ subset='training'
31
+ )
32
+
33
+ # Validation Data Generator (20% images)
34
+ validation_generator = train_datagen.flow_from_directory(
35
+ DATA_DIR,
36
+ target_size=IMAGE_SIZE,
37
+ batch_size=BATCH_SIZE,
38
+ class_mode='categorical',
39
+ subset='validation'
40
+ )
41
+
42
+ # --- 3. Transfer Learning Model Setup (VGG16) ---
43
+
44
+ # Base model ko load karein. include_top=False ka matlab hai ki final classification layers nahi leni.
45
+ base_model = VGG16(weights='imagenet',
46
+ include_top=False,
47
+ input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3))
48
+
49
+ # Base layers ko freeze karein taaki unke weights change na hon.
50
+ for layer in base_model.layers:
51
+ layer.trainable = False
52
+
53
+ # --- 4. Custom Classification Layers Add Karein ---
54
+ model = Sequential([
55
+ base_model,
56
+ Flatten(), # 2D output ko 1D vector mein badalna
57
+ Dense(256, activation='relu'), # Pehli hidden layer
58
+ Dropout(0.5), # Overfitting se bachne ke liye
59
+ Dense(NUM_CLASSES, activation='softmax') # Output layer (4 classes)
60
+ ])
61
+
62
+ # Model compile karein
63
+ model.compile(optimizer='adam',
64
+ loss='categorical_crossentropy',
65
+ metrics=['accuracy'])
66
+
67
+ model.summary()
68
+
69
+ # --- 5. Model Training ---
70
+ print("\n--- Starting Model Training ---")
71
+
72
+ # Aapko epochs ki value badhaani pad sakti hai (e.g., 20 ya 30) acchi accuracy ke liye.
73
+ history = model.fit(
74
+ train_generator,
75
+ epochs=10,
76
+ validation_data=validation_generator
77
+ )
78
+
79
+ # --- 6. Model Save Karein ---
80
+ # Ab is trained model ko save kar dein taki Streamlit use kar sake
81
+ MODEL_FILENAME = 'brain_tumor_model.keras'
82
+ model.save(MODEL_FILENAME)
83
+ print(f"\nModel successfully saved as: {MODEL_FILENAME}")