DarshanM0di commited on
Commit
815a66f
·
verified ·
1 Parent(s): ee7a5ca

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -0
app.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ from PIL import Image
4
+ import tensorflow as tf
5
+ from safetensors import safe_open
6
+
7
+ # ✅ Constants
8
+ IMG_SIZE = 224
9
+ CLASS_NAMES = ["Fractured", "Non-Fractured"]
10
+ SAFETENSOR_PATH = "osteologic.safetensors"
11
+
12
+ # ✅ Step 1: Rebuild architecture
13
+ def build_model():
14
+ inputs = tf.keras.Input(shape=(IMG_SIZE, IMG_SIZE, 3))
15
+ base_model = tf.keras.applications.MobileNetV2(weights=None, include_top=False, input_tensor=inputs)
16
+ x = base_model.output
17
+ x = tf.keras.layers.GlobalAveragePooling2D()(x)
18
+ x = tf.keras.layers.Dense(128, activation="relu", kernel_regularizer=tf.keras.regularizers.l2(0.001))(x)
19
+ x = tf.keras.layers.Dropout(0.5)(x)
20
+ outputs = tf.keras.layers.Dense(len(CLASS_NAMES), activation="softmax")(x)
21
+ model = tf.keras.Model(inputs, outputs)
22
+ return model
23
+
24
+ # ✅ Step 2: Load weights from .safetensors
25
+ def load_weights(model, path=SAFETENSOR_PATH):
26
+ with safe_open(path, framework="pt", device="cpu") as f:
27
+ for layer in model.layers:
28
+ if isinstance(layer, (tf.keras.layers.Conv2D, tf.keras.layers.Dense)):
29
+ w_key = f"{layer.name}.weight"
30
+ b_key = f"{layer.name}.bias"
31
+ if w_key in f.keys() and b_key in f.keys():
32
+ weights = f.get_tensor(w_key)
33
+ bias = f.get_tensor(b_key)
34
+ # Adjust shape if needed (PyTorch → TF)
35
+ if isinstance(layer, tf.keras.layers.Conv2D):
36
+ weights = weights.transpose(2, 3, 1, 0) # [out, in, h, w] → [h, w, in, out]
37
+ layer.set_weights([weights, bias])
38
+ return model
39
+
40
+ # ✅ Step 3: Build and load model
41
+ model = build_model()
42
+ model = load_weights(model)
43
+
44
+ # ✅ Step 4: Prediction function
45
+ def predict(image: Image.Image):
46
+ image = image.resize((IMG_SIZE, IMG_SIZE)).convert("RGB")
47
+ arr = np.array(image) / 255.0
48
+ arr = arr.reshape(1, IMG_SIZE, IMG_SIZE, 3)
49
+ preds = model.predict(arr)[0]
50
+ label = CLASS_NAMES[np.argmax(preds)]
51
+ confidence = round(float(np.max(preds)), 3)
52
+ return f"{label} ({confidence})"
53
+
54
+ # ✅ Step 5: Gradio interface
55
+ gr.Interface(
56
+ fn=predict,
57
+ inputs=gr.Image(type="pil", label="Upload Radiograph"),
58
+ outputs=gr.Text(label="Prediction"),
59
+ title="🦴 OsteoLogic Fracture Detector",
60
+ description="Upload a radiograph to detect fractures using safetensors-powered MobileNetV2."
61
+ ).launch()