ChAbhishek28 commited on
Commit
f51ed56
·
1 Parent(s): f7aba3b

Deploy VCE backend with Docker

Browse files
Dockerfile ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install system dependencies
6
+ RUN apt-get update && apt-get install -y \
7
+ libgl1-mesa-glx \
8
+ libglib2.0-0 \
9
+ && rm -rf /var/lib/apt/lists/*
10
+
11
+ # Copy requirements and install Python dependencies
12
+ COPY requirements.txt .
13
+ RUN pip install --no-cache-dir -r requirements.txt
14
+
15
+ # Copy application files
16
+ COPY . .
17
+
18
+ # Create uploads directory
19
+ RUN mkdir -p uploads
20
+
21
+ # Expose port
22
+ EXPOSE 7860
23
+
24
+ # Set environment variable for Hugging Face
25
+ ENV GRADIO_SERVER_NAME="0.0.0.0"
26
+ ENV GRADIO_SERVER_PORT=7860
27
+
28
+ # Run the application
29
+ CMD ["gunicorn", "app:app", "--bind", "0.0.0.0:7860", "--timeout", "300", "--workers", "1", "--threads", "2"]
README.md CHANGED
@@ -1,10 +1,12 @@
1
  ---
2
- title: VCE
3
- emoji: 🐢
4
- colorFrom: gray
5
  colorTo: blue
6
  sdk: docker
7
  pinned: false
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
  ---
2
+ title: VCE Medical Diagnosis Backend
3
+ emoji: 🏥
4
+ colorFrom: green
5
  colorTo: blue
6
  sdk: docker
7
  pinned: false
8
  ---
9
 
10
+ # VCE Medical Diagnosis Backend
11
+
12
+ AI-powered medical image analysis backend using TensorFlow and LinkNet model for video capsule endoscopy image segmentation.
__pycache__/modddel.cpython-310.pyc ADDED
Binary file (2.83 kB). View file
 
__pycache__/modddel.cpython-311.pyc ADDED
Binary file (5.81 kB). View file
 
__pycache__/predict.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
__pycache__/predict.cpython-311.pyc ADDED
Binary file (1.85 kB). View file
 
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify, send_from_directory
2
+ from werkzeug.utils import secure_filename
3
+ from flask_cors import CORS # Import Flask-CORS
4
+ import os
5
+ import sys
6
+
7
+ print("Starting Flask app...", file=sys.stderr)
8
+ print("Loading model...", file=sys.stderr)
9
+
10
+ from modddel import model
11
+ from predict import read_image_
12
+ from predict import display_segmentation
13
+ from PIL import Image
14
+ from modddel import np
15
+
16
+ print("Model loaded successfully!", file=sys.stderr)
17
+
18
+ app = Flask(__name__)
19
+ CORS(app) # Enable CORS for your Flask app
20
+
21
+ # Define the upload folder
22
+ UPLOAD_FOLDER = 'uploads'
23
+ app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
24
+
25
+ # Ensure the upload folder exists
26
+ if not os.path.exists(UPLOAD_FOLDER):
27
+ os.makedirs(UPLOAD_FOLDER)
28
+
29
+ @app.route('/', methods=['GET'])
30
+ def health_check():
31
+ return jsonify({'status': 'healthy', 'message': 'VCE Backend API is running'}), 200
32
+
33
+ @app.route('/upload', methods=['POST'])
34
+ def upload_file():
35
+ try:
36
+ # Check if the 'image' file is in the request
37
+ if 'image' not in request.files:
38
+ return jsonify({'error': 'No file part'})
39
+
40
+ # Access the uploaded file from the request
41
+ image_file = request.files['image']
42
+
43
+ # Save the uploaded file to the upload folder
44
+ filename = secure_filename(image_file.filename)
45
+ file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
46
+ image_file.save(file_path)
47
+ img=display_segmentation(file_path,model)
48
+ img_clipped = np.clip(img, 0, 1)
49
+ Image.fromarray((img_clipped * 255).astype(np.uint8)).save(file_path)
50
+ # Return the relative path to the saved file
51
+ return jsonify({'file_name': filename}), 200
52
+
53
+ except Exception as e:
54
+ return jsonify({'error': str(e)}), 400
55
+
56
+ @app.route('/uploads/<filename>')
57
+ def uploaded_file(filename):
58
+ return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
59
+
60
+ if __name__ == '__main__':
61
+ port = int(os.environ.get('PORT', 5000))
62
+ app.run(host='0.0.0.0', port=port, debug=False)
linknet_test.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cd5dc70486097b7f67ac4576c811fb22b9a40adece95a135482762716c24b63
3
+ size 30012056
main.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ def main():
2
+ print("Hello from backend!")
3
+
4
+
5
+ if __name__ == "__main__":
6
+ main()
modddel.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np # still fine if you use it elsewhere
2
+ import tensorflow as tf
3
+ from tensorflow import keras
4
+ from tensorflow.keras.layers import (
5
+ Activation,
6
+ Add,
7
+ BatchNormalization,
8
+ Concatenate,
9
+ Conv2D,
10
+ Input,
11
+ MaxPooling2D,
12
+ UpSampling2D,
13
+ )
14
+ from tensorflow.keras.models import Model
15
+
16
+
17
+ def conv_block(inputs, filters, kernel_size=3, strides=1):
18
+ x = Conv2D(filters, kernel_size, strides=strides, padding="same")(inputs)
19
+ x = BatchNormalization()(x)
20
+ x = Activation("relu")(x)
21
+ return x
22
+
23
+
24
+ def encoder_block(inputs, filters, kernel_size=3, strides=1):
25
+ x = conv_block(inputs, filters, kernel_size, strides)
26
+ x = conv_block(x, filters, kernel_size, 1)
27
+ shortcut = Conv2D(filters, kernel_size=1, strides=strides, padding="same")(inputs)
28
+ shortcut = BatchNormalization()(shortcut)
29
+ x = Add()([x, shortcut])
30
+ x = Activation("relu")(x)
31
+ return x
32
+
33
+
34
+ def decoder_block(inputs, filters, kernel_size=3, strides=1):
35
+ x = UpSampling2D(size=(2, 2))(inputs)
36
+ x = conv_block(x, filters, kernel_size, 1)
37
+ x = conv_block(x, filters, kernel_size, 1)
38
+ shortcut = UpSampling2D(size=(2, 2))(inputs)
39
+ shortcut = Conv2D(filters, kernel_size=1, strides=1, padding="same")(shortcut)
40
+ shortcut = BatchNormalization()(shortcut)
41
+ x = Add()([x, shortcut])
42
+ x = Activation("relu")(x)
43
+ return x
44
+
45
+
46
+ def linknet(input_shape=(224, 224, 3), num_classes=1):
47
+ inputs = Input(shape=input_shape)
48
+
49
+ # Encoder
50
+ enc1 = encoder_block(inputs, 64, strides=2)
51
+ enc2 = encoder_block(enc1, 128, strides=2)
52
+ enc3 = encoder_block(enc2, 256, strides=2)
53
+ enc4 = encoder_block(enc3, 512, strides=2)
54
+
55
+ # Decoder
56
+ dec4 = decoder_block(enc4, 256, strides=2)
57
+ dec3 = decoder_block(dec4, 128, strides=2)
58
+ dec2 = decoder_block(dec3, 64, strides=2)
59
+ dec1 = decoder_block(dec2, 64, strides=2)
60
+
61
+ outputs = Conv2D(num_classes, (1, 1), activation="sigmoid")(dec1)
62
+
63
+ model = Model(inputs, outputs)
64
+ return model
65
+
66
+
67
+ @keras.utils.register_keras_serializable()
68
+ def iou(y_true, y_pred):
69
+ # ensure predictions are in [0, 1]; clamp if needed
70
+ y_pred = tf.clip_by_value(y_pred, 0.0, 1.0)
71
+
72
+ # intersection and union
73
+ intersection = tf.reduce_sum(y_true * y_pred)
74
+ union = tf.reduce_sum(y_true) + tf.reduce_sum(y_pred) - intersection
75
+
76
+ return (intersection + 1e-15) / (union + 1e-15)
77
+
78
+
79
+ @keras.utils.register_keras_serializable()
80
+ def dice_coefficient(y_true, y_pred, smooth=1.0):
81
+ y_pred = tf.clip_by_value(y_pred, 0.0, 1.0)
82
+ intersection = tf.reduce_sum(y_true * y_pred)
83
+ union = tf.reduce_sum(y_true) + tf.reduce_sum(y_pred)
84
+ dice = (2.0 * intersection + smooth) / (union + smooth)
85
+ return dice
86
+
87
+
88
+ @keras.utils.register_keras_serializable()
89
+ def dice_coefficient_loss(y_true, y_pred):
90
+ return 1.0 - dice_coefficient(y_true, y_pred)
91
+
92
+
93
+ lr = 1e-4
94
+ model = linknet()
95
+ opt = tf.keras.optimizers.Adam(learning_rate=lr)
96
+ metrics = [
97
+ "acc",
98
+ tf.keras.metrics.Recall(),
99
+ tf.keras.metrics.Precision(),
100
+ iou,
101
+ dice_coefficient,
102
+ ]
103
+ model.compile(loss="binary_crossentropy", optimizer=opt, metrics=metrics)
104
+
105
+ model.load_weights("linknet_test.h5")
model_infer.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ from predict import cv2
3
+ from predict import read_image_
4
+ from predict import plt
5
+ from modddel import np
6
+ with open('model.pkl', 'rb') as f:
7
+ model = pickle.load(f)
8
+
9
+ def display_segmentation(image_path, model):
10
+ # Read image
11
+ x = read_image_(image_path)
12
+
13
+ # Predict mask
14
+ y_pred = model.predict(np.expand_dims(x, axis=0))[0] > 0.5
15
+ y_pred = cv2.resize(y_pred.astype(np.uint8), (x.shape[1], x.shape[0]))
16
+
17
+ # Create overlay with original image
18
+ overlay = x.copy()
19
+
20
+ # Highlight the boundary of the affected area
21
+ contours, _ = cv2.findContours(y_pred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
22
+ cv2.drawContours(overlay, contours, -1, (255, 255, 255), thickness=1) # Change color to white and reduce thickness
23
+
24
+ return overlay
25
+
26
+ # Example usage:
27
+ image_path = "img- (1).png"
28
+ segmentation_overlay = display_segmentation(image_path, model)
29
+
30
+ # Display the resulting image with segmentation overlay
31
+ plt.imshow(segmentation_overlay)
32
+ plt.title("Segmentation Overlay")
33
+ plt.show()
predict.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modddel import model
2
+
3
+ import numpy as np
4
+ import cv2
5
+
6
+
7
+ def read_image_(path):
8
+ #path=path.decode()
9
+ x = cv2.imread(path)
10
+ #print(x)
11
+ x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
12
+ x = cv2.resize(x, (224, 224))
13
+ x = x/255.0
14
+ return x
15
+
16
+ def display_segmentation(image_path, model):
17
+ # Read image
18
+ x = read_image_(image_path)
19
+
20
+ # Predict mask
21
+ y_pred = model.predict(np.expand_dims(x, axis=0))[0] > 0.5
22
+ y_pred = cv2.resize(y_pred.astype(np.uint8), (x.shape[1], x.shape[0]))
23
+
24
+ # Create overlay with original image
25
+ overlay = x.copy()
26
+
27
+ # Highlight the boundary of the affected area
28
+ contours, _ = cv2.findContours(y_pred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
29
+ cv2.drawContours(overlay, contours, -1, (255, 255, 255), thickness=1) # Change color to white and reduce thickness
30
+
31
+ # Blend the overlay with original image using alpha value for a whitish shade
32
+ alpha = 0.3 # Set the alpha value for transparency (adjust as needed)
33
+ cv2.addWeighted(overlay, alpha, x, 1 - alpha, 0, x)
34
+
35
+ # Display the image with segmentation overlay
36
+ return x
37
+
38
+
39
+
pyproject.toml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "backend"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = [
8
+ "flask>=3.1.2",
9
+ "flask-cors>=6.0.1",
10
+ "numpy==1.21.3",
11
+ "opencv-python>=4.6.0.66",
12
+ "pillow>=12.0.0",
13
+ "tensorflow==2.10.1",
14
+ ]
render.yaml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ - type: web
3
+ name: ai-vce-backend
4
+ env: python
5
+ buildCommand: "pip install -r requirements.txt"
6
+ startCommand: "gunicorn app:app --bind 0.0.0.0:$PORT --timeout 180 --workers 1 --preload"
7
+ envVars:
8
+ - key: PYTHON_VERSION
9
+ value: 3.10.15
10
+ disk:
11
+ name: uploads-disk
12
+ mountPath: /opt/render/project/src/uploads
13
+ sizeGB: 1
requirements.txt ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Core ML stack
3
+ tensorflow==2.10.1
4
+ keras==2.10.0
5
+ numpy==1.23.5
6
+ h5py==3.7.0
7
+
8
+ # Computer Vision
9
+ opencv-python-headless==4.10.0.84
10
+ Pillow==10.4.0
11
+
12
+ # Scientific utilities
13
+ matplotlib==3.7.4
14
+ scipy==1.10.1
15
+
16
+ # Flask Backend
17
+ Flask==3.0.0
18
+ flask-cors==4.0.0
19
+ Werkzeug==3.0.1
20
+ gunicorn==21.2.0
21
+
22
+ # Image handling
23
+ scikit-image==0.22.0
uploads/.gitkeep ADDED
@@ -0,0 +1 @@
 
 
1
+ # Keep this directory in git but ignore its contents
uploads/98677e90349dff1d04519b1cebc0d02a.jpg ADDED
uploads/WhatsApp_Image_2025-11-17_at_16.30.26.jpeg ADDED
uploads/anm7091.jpg ADDED
uploads/icon-removebg-preview1.png ADDED
uploads/img-_1.png ADDED
uv.lock ADDED
The diff for this file is too large to render. See raw diff