sumoy47 commited on
Commit
e30002d
·
verified ·
1 Parent(s): f5f442d

Deployed backend

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. Dockerfile +21 -0
  3. autism_model.keras +3 -0
  4. main.py +188 -0
  5. requirements.txt +9 -0
  6. scaler.pkl +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ autism_model.keras filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Python 3.9
2
+ FROM python:3.9
3
+
4
+ # Set working directory
5
+ WORKDIR /code
6
+
7
+ # Copy requirements and install
8
+ COPY ./requirements.txt /code/requirements.txt
9
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
10
+
11
+ # Create a non-root user (Security requirement for HF Spaces)
12
+ RUN useradd -m -u 1000 user
13
+ USER user
14
+ ENV HOME=/home/user \
15
+ PATH=/home/user/.local/bin:$PATH
16
+
17
+ # Copy the application code
18
+ COPY --chown=user . /code
19
+
20
+ # Start the server on port 7860
21
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
autism_model.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27ad2ec4570baaa7c8f24ab8816b8dbe4dd5f1720e5c223c8c24cba9d5ca0297
3
+ size 36319656
main.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uvicorn
2
+ from fastapi import FastAPI, File, UploadFile, Form
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ import tensorflow as tf
5
+ from tensorflow.keras import layers, models, applications, Input, regularizers
6
+ import numpy as np
7
+ import joblib
8
+ import cv2
9
+ import base64
10
+ from PIL import Image
11
+ import io
12
+ import json
13
+
14
+ # --- 0. SETUP ---
15
+ app = FastAPI()
16
+ app.add_middleware(
17
+ CORSMiddleware,
18
+ allow_origins=["*"],
19
+ allow_methods=["*"],
20
+ allow_headers=["*"],
21
+ )
22
+
23
+ # --- 1. DEFINE ARCHITECTURE (Exact Match to Training) ---
24
+ def cbam_block(x, ratio=8):
25
+ channel = x.shape[-1]
26
+
27
+ # 1. Channel Attention
28
+ l1 = layers.Dense(channel // ratio, activation="relu", use_bias=False)
29
+ l2 = layers.Dense(channel, use_bias=False)
30
+ x_avg = l2(l1(layers.GlobalAveragePooling2D()(x)))
31
+ x_max = l2(l1(layers.GlobalMaxPooling2D()(x)))
32
+ x_att = layers.Activation('sigmoid')(layers.Add()([x_avg, x_max]))
33
+ x_att = layers.Reshape((1, 1, channel))(x_att)
34
+ x = layers.Multiply()([x, x_att])
35
+
36
+ # 2. Spatial Attention (FIXED: Uses Lambda to match training shapes)
37
+ # This reduces Channels to 1, resulting in (H, W, 1)
38
+ avg_pool = layers.Lambda(lambda t: tf.reduce_mean(t, axis=-1, keepdims=True))(x)
39
+ max_pool = layers.Lambda(lambda t: tf.reduce_max(t, axis=-1, keepdims=True))(x)
40
+
41
+ concat = layers.Concatenate(axis=-1)([avg_pool, max_pool]) # Shape (H, W, 2)
42
+ conv = layers.Conv2D(1, 7, padding='same', activation='sigmoid', use_bias=False)(concat)
43
+
44
+ return layers.Multiply()([x, conv])
45
+
46
+ @tf.keras.utils.register_keras_serializable()
47
+ class TransformerBlock(layers.Layer):
48
+ def __init__(self, embed_dim=64, num_heads=4, ff_dim=128, rate=0.1, **kwargs):
49
+ super().__init__(**kwargs)
50
+ self.embed_dim = embed_dim
51
+ self.num_heads = num_heads
52
+ self.ff_dim = ff_dim
53
+ self.rate = rate
54
+ self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
55
+ self.ffn = models.Sequential([layers.Dense(ff_dim, "relu"), layers.Dense(embed_dim)])
56
+ self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
57
+ self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
58
+ self.dropout1 = layers.Dropout(rate)
59
+ self.dropout2 = layers.Dropout(rate)
60
+
61
+ def call(self, inputs, training=True):
62
+ out1 = self.layernorm1(inputs + self.dropout1(self.att(inputs, inputs), training=training))
63
+ return self.layernorm2(out1 + self.dropout2(self.ffn(out1), training=training))
64
+
65
+ def build_model_local():
66
+ # Visual Branch
67
+ img_in = Input(shape=(224, 224, 3), name='image_input')
68
+ base = applications.EfficientNetB0(include_top=False, weights='imagenet', input_tensor=img_in)
69
+ for layer in base.layers[:-20]: layer.trainable = False
70
+
71
+ x = cbam_block(base.output)
72
+ x = layers.GlobalAveragePooling2D()(x)
73
+ img_vec = layers.Dense(128, activation='relu', kernel_regularizer=regularizers.l2(0.0001))(x)
74
+
75
+ # Tabular Branch
76
+ input_dim = 14
77
+ tab_in = Input(shape=(input_dim,), name='tabular_input')
78
+ x = layers.Dense(input_dim * 64)(tab_in)
79
+ x = layers.Reshape((input_dim, 64))(x)
80
+ x = TransformerBlock(embed_dim=64, num_heads=4, ff_dim=128, rate=0.3)(x)
81
+ x = layers.GlobalAveragePooling1D()(x)
82
+ tab_vec = layers.Dense(128, activation='relu', kernel_regularizer=regularizers.l2(0.0001))(x)
83
+
84
+ # Fusion
85
+ combined = layers.Concatenate()([img_vec, tab_vec])
86
+ z = layers.Dense(64, activation='relu')(combined)
87
+ z = layers.Dropout(0.4)(z)
88
+ out = layers.Dense(1, activation='sigmoid', name='diagnosis')(z)
89
+
90
+ model = models.Model(inputs=[img_in, tab_in], outputs=out)
91
+ return model
92
+
93
+ # --- 2. LOAD ASSETS ---
94
+ print("⏳ Loading Assets...")
95
+ model = None
96
+ scaler = None
97
+
98
+ try:
99
+ # A. Scaler
100
+ scaler = joblib.load("scaler.pkl")
101
+ print(" ✅ Scaler Loaded.")
102
+
103
+ # B. Model
104
+ model = build_model_local()
105
+ # Now the shapes match (7,7,2,1) -> (7,7,2,1)
106
+ model.load_weights("autism_model.keras")
107
+ print(" ✅ Model Weights Loaded.")
108
+
109
+ except Exception as e:
110
+ print(f"\n❌ CRITICAL ERROR: {e}\n")
111
+
112
+ # --- 3. HELPER FUNCTIONS ---
113
+ def generate_gradcam(img_array):
114
+ if model is None: return np.zeros((224,224))
115
+
116
+ # Robust Layer Detection (Looking for 4D output)
117
+ target_layer = None
118
+ for layer in reversed(model.layers):
119
+ try:
120
+ if len(layer.output.shape) == 4:
121
+ target_layer = layer.name
122
+ break
123
+ except: continue
124
+
125
+ grad_model = tf.keras.models.Model(
126
+ inputs=model.inputs,
127
+ outputs=[model.get_layer(target_layer).output, model.output]
128
+ )
129
+
130
+ with tf.GradientTape() as tape:
131
+ img_tensor = tf.cast(img_array, tf.float32)
132
+ dummy_tab = tf.zeros((1, 14), dtype=tf.float32)
133
+ inputs = [img_tensor, dummy_tab]
134
+ conv_out, preds = grad_model(inputs)
135
+ loss = preds[:, 0]
136
+
137
+ grads = tape.gradient(loss, conv_out)
138
+ pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
139
+ heatmap = conv_out[0] @ pooled_grads[..., tf.newaxis]
140
+ heatmap = tf.squeeze(heatmap)
141
+ heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
142
+ return heatmap.numpy()
143
+
144
+ @app.post("/predict")
145
+ async def predict(file: UploadFile = File(...), patient_data: str = Form(...)):
146
+ if model is None or scaler is None:
147
+ return {"error": "Server initialization failed."}
148
+
149
+ # Process Image
150
+ img_bytes = await file.read()
151
+ image = Image.open(io.BytesIO(img_bytes)).convert("RGB")
152
+ image = image.resize((224, 224))
153
+ img_array = np.array(image)
154
+ img_input = np.expand_dims(img_array / 255.0, axis=0)
155
+
156
+ # Process Tabular
157
+ data = json.loads(patient_data)
158
+ features = [
159
+ data['A1'], data['A2'], data['A3'], data['A4'], data['A5'],
160
+ data['A6'], data['A7'], data['A8'], data['A9'], data['A10'],
161
+ data['Age'], data['Sex'], data['Jaundice'], data['FamHx']
162
+ ]
163
+ tab_input = scaler.transform(np.array([features]))
164
+
165
+ # Predict
166
+ prediction = model.predict([img_input, tab_input])
167
+ risk_score = float(prediction[0][0])
168
+
169
+ # XAI
170
+ heatmap = generate_gradcam(img_input)
171
+ heatmap_uint8 = np.uint8(255 * heatmap)
172
+ jet = cv2.applyColorMap(heatmap_uint8, cv2.COLORMAP_JET)
173
+ jet = cv2.resize(jet, (224, 224))
174
+ original_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
175
+ superimposed = cv2.addWeighted(original_cv, 0.6, jet, 0.4, 0)
176
+
177
+ _, buffer = cv2.imencode('.jpg', superimposed)
178
+ xai_b64 = base64.b64encode(buffer).decode('utf-8')
179
+
180
+ return {
181
+ "risk_score": risk_score,
182
+ "diagnosis": "Autistic" if risk_score > 0.40 else "Non-Autistic",
183
+ "xai_image": f"data:image/jpeg;base64,{xai_b64}"
184
+ }
185
+
186
+ if __name__ == "__main__":
187
+ # Hugging Face Spaces requires port 7860!
188
+ uvicorn.run(app, host="0.0.0.0", port=7860)
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ tensorflow
4
+ scikit-learn==1.2.2
5
+ pandas
6
+ joblib
7
+ pillow
8
+ opencv-python-headless
9
+ python-multipart
scaler.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37558bfbe5818cfbbfcb4609648cecef1aecdef988ba37cd936816b615683598
3
+ size 120551