saad1BM commited on
Commit
8aceb0f
·
verified ·
1 Parent(s): b250029

Upload 4 files

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. app.py +136 -0
  3. brain_tumor_model.keras +3 -0
  4. requirements.txt +166 -0
  5. train_model.py +73 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ brain_tumor_model.keras filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import shutil
3
+ import tensorflow as tf
4
+ from tensorflow.keras.models import load_model
5
+ import numpy as np
6
+ from PIL import Image
7
+ from huggingface_hub import hf_hub_download
8
+ import os
9
+
10
+ HF_REPO_ID = "saad1BM/brain-tumor-detection"
11
+ MODEL_FILENAME = "brain_tumor_model.keras"
12
+ MODEL_PATH = MODEL_FILENAME
13
+
14
+ IMAGE_SIZE = (224, 224)
15
+ CLASS_NAMES = ['glioma_tumor', 'meningioma_tumor', 'no_tumor', 'pituitary_tumor']
16
+
17
+
18
+ @st.cache_resource
19
+ def load_trained_model():
20
+ """Trained model ko Hugging Face se download aur load karta hai."""
21
+
22
+
23
+ global HF_REPO_ID, MODEL_FILENAME, MODEL_PATH
24
+
25
+ MIN_SIZE = 100 * 1024 * 1024
26
+
27
+ try:
28
+
29
+ if not os.path.exists(MODEL_PATH) or os.path.getsize(MODEL_PATH) < MIN_SIZE:
30
+ st.info(f"Downloading model from Hugging Face: {HF_REPO_ID}/{MODEL_FILENAME}")
31
+
32
+
33
+ downloaded_path = hf_hub_download(
34
+ repo_id=HF_REPO_ID,
35
+ filename=MODEL_FILENAME,
36
+
37
+ cache_dir="/tmp/hf_cache",
38
+ )
39
+
40
+ st.info("Copying model file locally to avoid cross-device link error...")
41
+
42
+
43
+ shutil.copyfile(downloaded_path, MODEL_PATH)
44
+
45
+ st.success("Model download and copy successful!")
46
+
47
+
48
+ model = load_model(MODEL_PATH)
49
+ return model
50
+
51
+ except Exception as e:
52
+
53
+ st.error(f"Model Load Failed. Ensure 'huggingface-hub' is in requirements.txt and file is public. Error: {e}")
54
+ return None
55
+
56
+ model = load_trained_model()
57
+
58
+
59
+ def predict_image(image_file, model):
60
+ """Uploaded image par prediction karta hai."""
61
+ if model is None:
62
+ return "Model Load Failed", 0.0
63
+
64
+
65
+ img = Image.open(image_file).convert("RGB")
66
+
67
+ img = img.resize(IMAGE_SIZE)
68
+ img_array = np.array(img)
69
+
70
+
71
+ img_array = np.expand_dims(img_array, axis=0)
72
+
73
+
74
+ img_array = img_array / 255.0
75
+
76
+
77
+ predictions = model.predict(img_array)
78
+
79
+
80
+ predicted_index = np.argmax(predictions, axis=1)[0]
81
+ confidence_score = np.max(predictions) * 100
82
+
83
+ predicted_class = CLASS_NAMES[predicted_index]
84
+
85
+ return predicted_class, confidence_score
86
+
87
+
88
+
89
+ st.set_page_config(page_title="Brain Tumor Detection", layout="wide")
90
+
91
+
92
+ st.title("🧠 Brain Tumor Detection System (AI Powered)")
93
+ st.write("Upload an MRI image below to classify it as one of the tumor types or no tumor.")
94
+ st.markdown("---")
95
+
96
+ col1, col2 = st.columns(2)
97
+
98
+ with col1:
99
+
100
+ uploaded_file = st.file_uploader("Upload MRI Image:", type=["jpg", "jpeg", "png"])
101
+
102
+ if uploaded_file is not None:
103
+
104
+ st.image(uploaded_file, caption="Uploaded MRI Image", use_column_width=True)
105
+ st.markdown("---")
106
+
107
+
108
+ if st.button("Detect Tumor"):
109
+ st.spinner("Analyzing image and detecting tumor...")
110
+
111
+
112
+ predicted_class, confidence_score = predict_image(uploaded_file, model)
113
+
114
+
115
+
116
+
117
+ if predicted_class == 'no_tumor':
118
+ result_label = f"🟢 **Prediction: No Tumor**"
119
+ elif predicted_class == 'Model Load Failed':
120
+ result_label = f"❌ **Prediction: Model Initialization Error**"
121
+ st.error("Model could not be loaded for prediction. Please check logs for Hugging Face download errors.")
122
+ else:
123
+ result_label = f"🔴 **Prediction: Tumor ({predicted_class.replace('_', ' ').title()})**"
124
+
125
+ st.success("✅ Analysis Complete")
126
+ st.subheader(result_label)
127
+ st.metric(label="Confidence Score", value=f"{confidence_score:.2f}%")
128
+
129
+ st.write("---")
130
+
131
+ with col2:
132
+ st.header("Results and Interpretation")
133
+ st.info("The system uses Transfer Learning (VGG16) to classify the image into four categories: Glioma, Meningioma, Pituitary, or No Tumor.")
134
+
135
+ if uploaded_file is None:
136
+ st.warning("Please upload an image and click 'Detect Tumor' to see the results.")
brain_tumor_model.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d20e5a0cf49890f3b06898737a2f0a84dc9febfe70a184da563c967d7846cbf5
3
+ size 136037810
requirements.txt ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.3.1
2
+ aiohappyeyeballs==2.6.1
3
+ aiohttp==3.13.2
4
+ aiosignal==1.4.0
5
+ altair==6.0.0
6
+ annotated-types==0.7.0
7
+ anyio==4.12.0
8
+ astunparse==1.6.3
9
+ async-timeout==4.0.3
10
+ attrs==25.4.0
11
+ backoff==2.2.1
12
+ bcrypt==5.0.0
13
+ blinker==1.9.0
14
+ build==1.3.0
15
+ cachetools==6.2.2
16
+ certifi==2025.11.12
17
+ charset-normalizer==3.4.4
18
+ chromadb==1.3.6
19
+ click==8.3.1
20
+ colorama==0.4.6
21
+ coloredlogs==15.0.1
22
+ dataclasses-json==0.6.7
23
+ distro==1.9.0
24
+ durationpy==0.10
25
+ exceptiongroup==1.3.1
26
+ filelock==3.20.0
27
+ flatbuffers==25.9.23
28
+ frozenlist==1.8.0
29
+ fsspec==2025.12.0
30
+ gast==0.7.0
31
+ gitdb==4.0.12
32
+ GitPython==3.1.45
33
+ google-auth==2.43.0
34
+ google-pasta==0.2.0
35
+ googleapis-common-protos==1.72.0
36
+ greenlet==3.3.0
37
+ grpcio==1.76.0
38
+ h11==0.16.0
39
+ h5py==3.15.1
40
+ hf-xet==1.2.0
41
+ httpcore==1.0.9
42
+ httptools==0.7.1
43
+ httpx==0.28.1
44
+ httpx-sse==0.4.3
45
+ huggingface_hub==1.2.2
46
+ humanfriendly==10.0
47
+ idna==3.11
48
+ importlib_metadata==8.7.0
49
+ importlib_resources==6.5.2
50
+ Jinja2==3.1.6
51
+ jiter==0.12.0
52
+ jsonpatch==1.33
53
+ jsonpointer==3.0.0
54
+ jsonschema==4.25.1
55
+ jsonschema-specifications==2025.9.1
56
+ kagglehub==0.3.13
57
+ keras==3.12.0
58
+ kubernetes==34.1.0
59
+ langchain==1.1.3
60
+ langchain-classic==1.0.0
61
+ langchain-community==0.4.1
62
+ langchain-core==1.1.3
63
+ langchain-openai==1.1.1
64
+ langchain-text-splitters==1.0.0
65
+ langgraph==1.0.4
66
+ langgraph-checkpoint==3.0.1
67
+ langgraph-prebuilt==1.0.5
68
+ langgraph-sdk==0.2.15
69
+ langsmith==0.4.59
70
+ libclang==18.1.1
71
+ Markdown==3.10
72
+ markdown-it-py==4.0.0
73
+ MarkupSafe==3.0.3
74
+ marshmallow==3.26.1
75
+ mdurl==0.1.2
76
+ ml_dtypes==0.5.4
77
+ mmh3==5.2.0
78
+ mpmath==1.3.0
79
+ multidict==6.7.0
80
+ mypy_extensions==1.1.0
81
+ namex==0.1.0
82
+ narwhals==2.13.0
83
+ numpy==2.2.6
84
+ oauthlib==3.3.1
85
+ onnxruntime==1.23.2
86
+ openai==2.9.0
87
+ opentelemetry-api==1.39.0
88
+ opentelemetry-exporter-otlp-proto-common==1.39.0
89
+ opentelemetry-exporter-otlp-proto-grpc==1.39.0
90
+ opentelemetry-proto==1.39.0
91
+ opentelemetry-sdk==1.39.0
92
+ opentelemetry-semantic-conventions==0.60b0
93
+ opt_einsum==3.4.0
94
+ optree==0.18.0
95
+ orjson==3.11.5
96
+ ormsgpack==1.12.0
97
+ overrides==7.7.0
98
+ packaging==25.0
99
+ pandas==2.3.3
100
+ pillow==12.0.0
101
+ posthog==5.4.0
102
+ propcache==0.4.1
103
+ protobuf==6.33.2
104
+ pyarrow==22.0.0
105
+ pyasn1==0.6.1
106
+ pyasn1_modules==0.4.2
107
+ pybase64==1.4.3
108
+ pydantic==2.12.5
109
+ pydantic-settings==2.12.0
110
+ pydantic_core==2.41.5
111
+ pydeck==0.9.1
112
+ Pygments==2.19.2
113
+ pypdf==6.4.1
114
+ PyPika==0.48.9
115
+ pyproject_hooks==1.2.0
116
+ pyreadline3==3.5.4
117
+ python-dateutil==2.9.0.post0
118
+ python-dotenv==1.2.1
119
+ pytz==2025.2
120
+ PyYAML==6.0.3
121
+ referencing==0.37.0
122
+ regex==2025.11.3
123
+ requests==2.32.5
124
+ requests-oauthlib==2.0.0
125
+ requests-toolbelt==1.0.0
126
+ huggingface-hub
127
+ rich==14.2.0
128
+ rpds-py==0.30.0
129
+ rsa==4.9.1
130
+ shellingham==1.5.4
131
+ six==1.17.0
132
+ smmap==5.0.2
133
+ sniffio==1.3.1
134
+ SQLAlchemy==2.0.45
135
+ streamlit==1.52.1
136
+ sympy==1.14.0
137
+ tenacity==9.1.2
138
+ tensorboard==2.20.0
139
+ tensorboard-data-server==0.7.2
140
+ tensorflow==2.20.0
141
+ termcolor==3.2.0
142
+ tiktoken==0.12.0
143
+ tokenizers==0.22.1
144
+ toml==0.10.2
145
+ tomli==2.3.0
146
+ tornado==6.5.3
147
+ tqdm==4.67.1
148
+ typer==0.20.0
149
+ typer-slim==0.20.0
150
+ typing-inspect==0.9.0
151
+ typing-inspection==0.4.2
152
+ typing_extensions==4.15.0
153
+ tzdata==2025.2
154
+ urllib3==2.3.0
155
+ uuid_utils==0.12.0
156
+ uvicorn==0.38.0
157
+ watchdog==6.0.0
158
+ watchfiles==1.1.1
159
+ websocket-client==1.9.0
160
+ websockets==15.0.1
161
+ Werkzeug==3.1.4
162
+ wrapt==2.0.1
163
+ xxhash==3.6.0
164
+ yarl==1.22.0
165
+ zipp==3.23.0
166
+ zstandard==0.25.0
train_model.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ from tensorflow.keras.preprocessing.image import ImageDataGenerator
3
+ from tensorflow.keras.applications import VGG16
4
+ from tensorflow.keras.models import Sequential
5
+ from tensorflow.keras.layers import Dense, Flatten, Dropout
6
+ import os
7
+
8
+
9
+ DATA_DIR = 'dataset/Training'
10
+ IMAGE_SIZE = (224, 224) # VGG16 ke liye standard input size
11
+ BATCH_SIZE = 32
12
+ NUM_CLASSES = 4 # gliomat_tumor, meningioma_tumor, no_tumor, pituitary_tumor
13
+
14
+ train_datagen = ImageDataGenerator(
15
+ rescale=1./255,
16
+ validation_split=0.2, # 20% data validation ke liye use hoga
17
+
18
+ )
19
+
20
+
21
+ train_generator = train_datagen.flow_from_directory(
22
+ DATA_DIR,
23
+ target_size=IMAGE_SIZE,
24
+ batch_size=BATCH_SIZE,
25
+ class_mode='categorical', # 4 classes ke liye 'categorical' use karte hain
26
+ subset='training'
27
+ )
28
+
29
+
30
+ validation_generator = train_datagen.flow_from_directory(
31
+ DATA_DIR,
32
+ target_size=IMAGE_SIZE,
33
+ batch_size=BATCH_SIZE,
34
+ class_mode='categorical',
35
+ subset='validation'
36
+ )
37
+
38
+
39
+ base_model = VGG16(weights='imagenet',
40
+ include_top=False,
41
+ input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3))
42
+
43
+
44
+ for layer in base_model.layers:
45
+ layer.trainable = False
46
+
47
+ model = Sequential([
48
+ base_model,
49
+ Flatten(), # 2D output ko 1D vector mein badalna
50
+ Dense(256, activation='relu'), # Pehli hidden layer
51
+ Dropout(0.5), # Overfitting se bachne ke liye
52
+ Dense(NUM_CLASSES, activation='softmax') # Output layer (4 classes)
53
+ ])
54
+
55
+
56
+ model.compile(optimizer='adam',
57
+ loss='categorical_crossentropy',
58
+ metrics=['accuracy'])
59
+
60
+ model.summary()
61
+
62
+
63
+ print("\n--- Starting Model Training ---")
64
+
65
+ history = model.fit(
66
+ train_generator,
67
+ epochs=10,
68
+ validation_data=validation_generator
69
+ )
70
+
71
+ MODEL_FILENAME = 'brain_tumor_model.keras'
72
+ model.save(MODEL_FILENAME)
73
+ print(f"\nModel successfully saved as: {MODEL_FILENAME}")