File size: 2,712 Bytes
07de1be
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
from flask import Flask, render_template, request, jsonify
import cv2
import mediapipe as mp
import numpy as np
import statistics
import pyrealsense2 as rs
from sklearn.linear_model import LinearRegression

app = Flask(__name__)

# Initialize MediaPipe Face Mesh
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.7)

def initialize_realsense():
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    pipeline.start(config)
    return pipeline

def process_frame(color_image, depth_frame):
    rgb_frame = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
    results = face_mesh.process(rgb_frame)
    
    if results.multi_face_landmarks:
        for face_landmarks in results.multi_face_landmarks:
            upper_lip = face_landmarks.landmark[13]
            lower_lip = face_landmarks.landmark[14]
            
            h, w, _ = color_image.shape
            upper_lip_coords = (int(upper_lip.x * w), int(upper_lip.y * h))
            lower_lip_coords = (int(lower_lip.x * w), int(lower_lip.y * h))
            
            return upper_lip_coords, lower_lip_coords
    return None, None

@app.route('/')
def home():
    return render_template('index.html')

@app.route('/analyze', methods=['POST'])
def analyze():
    pipeline = initialize_realsense()
    try:
        frames = pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        depth_frame = frames.get_depth_frame()
        
        color_image = np.asanyarray(color_frame.get_data())
        upper_lip_coords, lower_lip_coords = process_frame(color_image, depth_frame)
        
        if upper_lip_coords and lower_lip_coords:
            # Process measurements and calculate distance
            distance = calculate_distance(upper_lip_coords, lower_lip_coords, depth_frame)
            stage = determine_stage(distance)
            
            return jsonify({
                'distance': float(distance),
                'stage': stage,
                'status': 'success'
            })
        
        return jsonify({'status': 'error', 'message': 'No face detected'})
    
    finally:
        pipeline.stop()

def calculate_distance(upper_lip_coords, lower_lip_coords, depth_frame):
    # Your distance calculation logic here
    pass

def determine_stage(distance):
    if distance >= 45:
        return "Stage I"
    elif 20 <= distance < 45:
        return "Stage II"
    else:
        return "Stage III"

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=7860)