CareVisionApp / app.py
azizmeer40's picture
Update app.py
5f9427c verified
# app.py
from flask import Flask, render_template_string, request, jsonify
from flask_cors import CORS
import base64
import cv2
import numpy as np
import time
import io
from PIL import Image
app = Flask(__name__)
CORS(app)
# -------------------------
# Simple HTML template served by Flask (one-file)
# -------------------------
HTML = """
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>CareVision - Fall Detection (Frontend)</title>
<style>
body { background:#0f1720; color:#e6eef8; font-family: Arial, Helvetica, sans-serif; margin:0; padding:20px; }
.header { text-align:center; }
h1 { color:#00d1ff; margin-bottom:6px; }
.sub { color:#b8c7d6; margin-bottom:20px; }
.controls { display:flex; gap:12px; margin-bottom:12px; align-items:center; flex-wrap:wrap; }
.box { background:#0b1220; border-radius:8px; padding:12px; box-shadow: 0 2px 8px rgba(0,0,0,0.6); }
#video_orig, #video_sys { width: 640px; height:360px; background:black; border-radius:6px; }
#canv_sys { width:640px; height:360px; background:#111; border-radius:6px; }
.cols { display:flex; gap:16px; flex-wrap:wrap; }
.status { font-weight:600; margin-top:8px; }
.alert { color:#ff5c5c; font-weight:700; }
.btn { background:#0ea5e9; border:none; padding:8px 12px; color:#042029; border-radius:6px; cursor:pointer; font-weight:700; }
.btn:active { transform:translateY(1px); }
input[type=file] { color:#fff; }
label.radio { margin-right:10px; cursor:pointer; }
.small { font-size:13px; color:#9fb2c6; }
</style>
</head>
<body>
<div class="header">
<h1>Welcome to CareVision — Fall Detection System</h1>
<div class="sub">Choose input type then press Run. (This frontend simulates detections — model integration explained below.)</div>
</div>
<div class="controls box">
<label class="radio"><input type="radio" name="mode" value="upload" checked> Upload Video</label>
<label class="radio"><input type="radio" name="mode" value="camera"> Real-time Camera</label>
<input id="fileInput" type="file" accept="video/*" />
<button id="startBtn" class="btn">▶ Run</button>
<button id="stopBtn" class="btn" style="background:#ff8a65">⏹ Stop</button>
<button id="ngrokInfo" class="btn" style="background:#ffd166">How to open on phone</button>
<div style="margin-left:20px;" class="small">Status: <span id="statusText">Waiting...</span></div>
</div>
<div class="cols">
<div class="box">
<div style="font-weight:700; margin-bottom:6px">Original (Input)</div>
<video id="video_orig" playsinline muted></video>
<div class="small">This shows uploaded video or webcam stream.</div>
</div>
<div class="box">
<div style="font-weight:700; margin-bottom:6px">System View (Detection)</div>
<canvas id="canv_sys" width="640" height="360"></canvas>
<div class="small">System overlays bounding boxes & status here.</div>
<div class="status" id="detectionStatus">No detection yet.</div>
</div>
</div>
<script>
const modeRadios = document.getElementsByName('mode');
const fileInput = document.getElementById('fileInput');
const videoOrig = document.getElementById('video_orig');
const canvSys = document.getElementById('canv_sys');
const ctx = canvSys.getContext('2d');
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const statusText = document.getElementById('statusText');
const detectionStatus = document.getElementById('detectionStatus');
const ngrokInfo = document.getElementById('ngrokInfo');
let stream = null;
let running = false;
let frameCounter = 0;
ngrokInfo.onclick = () => {
alert("To open on phone with camera access: run ngrok (HTTPS) for your server. See terminal instructions provided in README.");
};
function getSelectedMode() {
for (const r of modeRadios) if (r.checked) return r.value;
return 'upload';
}
// When file selected -> load into video element
fileInput.onchange = (e) => {
const f = e.target.files[0];
if (!f) return;
const url = URL.createObjectURL(f);
videoOrig.src = url;
videoOrig.load();
statusText.textContent = "Video loaded";
};
async function start() {
const mode = getSelectedMode();
running = true;
frameCounter = 0;
statusText.textContent = "Running (" + mode + ")";
detectionStatus.textContent = "Monitoring...";
if (mode === 'camera') {
// request webcam
try {
stream = await navigator.mediaDevices.getUserMedia({ video: { width:640, height:360 }, audio: false });
videoOrig.srcObject = stream;
await videoOrig.play();
} catch (err) {
alert("Camera access denied or not available. Try localhost or use HTTPS (ngrok) for phone.");
running = false;
return;
}
} else {
// upload mode - play video
if (!videoOrig.src) {
alert("Please choose a video file first.");
running = false;
return;
}
videoOrig.currentTime = 0;
await videoOrig.play();
}
// loop to capture frames and send for prediction (simulated)
const fpsInterval = 200; // send a frame every 200ms (~5 fps)
const processLoop = async () => {
if (!running) return;
if (videoOrig.paused) {
// keep waiting
setTimeout(processLoop, 200);
return;
}
// draw current frame to an offscreen canvas
const w = canvSys.width, h = canvSys.height;
// draw original frame into detection canvas as background
ctx.drawImage(videoOrig, 0, 0, w, h);
// get base64 image
const dataUrl = canvSys.toDataURL('image/jpeg', 0.6);
// send to server for prediction
try {
const res = await fetch('/predict_frame', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ image: dataUrl })
});
const j = await res.json();
// j = { fall: bool, bbox: [x,y,w,h], message: str }
// draw bbox
if (j && j.bbox) {
ctx.lineWidth = 4;
ctx.strokeStyle = j.fall ? 'red' : 'cyan';
ctx.strokeRect(j.bbox[0], j.bbox[1], j.bbox[2], j.bbox[3]);
}
// display status
if (j && j.fall) {
detectionStatus.innerHTML = '<span class="alert">⚠️ Fall Detected! ' + (j.message||'') + '</span>';
// voice alert using Web Speech API
if ('speechSynthesis' in window) {
const s = new SpeechSynthesisUtterance("Fall detected. Please go and save the person.");
window.speechSynthesis.cancel();
window.speechSynthesis.speak(s);
}
} else {
detectionStatus.innerHTML = 'No fall detected';
}
} catch (err) {
console.error("Predict error:", err);
}
setTimeout(processLoop, fpsInterval);
};
processLoop();
}
function stopAll() {
running = false;
statusText.textContent = "Stopped";
detectionStatus.textContent = "Stopped";
// stop camera stream if any
if (stream) {
stream.getTracks().forEach(t=>t.stop());
stream = null;
}
try { videoOrig.pause(); } catch(e){}
}
startBtn.onclick = start;
stopBtn.onclick = stopAll;
// Clean up on page unload
window.addEventListener('beforeunload', () => {
if (stream) stream.getTracks().forEach(t=>t.stop());
});
</script>
</body>
</html>
"""
# -------------------------
# Prediction endpoint (simulate for now)
# Replace this function with real model inference later
# -------------------------
def simulate_prediction_from_image_b64(data_url):
# data_url like "data:image/jpeg;base64,......"
try:
header, b64 = data_url.split(',', 1)
img_bytes = base64.b64decode(b64)
img = Image.open(io.BytesIO(img_bytes)).convert('RGB')
npimg = np.array(img)
h, w, _ = npimg.shape
except Exception as e:
# fallback small image
w, h = 640, 360
# Very simple simulation:
# - after a time-based seed, we occasionally return a detected fall.
# - This is for demo only. Replace with model inference logic.
now = int(time.time())
# simple pseudo-random deterministic pattern: every 10 seconds simulate a detection for demo
fall = (now // 10) % 6 == 0 # roughly once in a while
if fall:
# bounding box in center
bw = int(w * 0.4)
bh = int(h * 0.5)
bx = int((w - bw) / 2)
by = int((h - bh) / 2)
return {'fall': True, 'bbox': [bx, by, bw, bh], 'message': 'Simulated fall (demo)'}
else:
# one example normal bbox (cyan)
bw = int(w * 0.45)
bh = int(h * 0.6)
bx = int((w - bw) / 2)
by = int((h - bh) / 2)
return {'fall': False, 'bbox': [bx, by, bw, bh], 'message': 'No fall'}
@app.route('/')
def index():
return render_template_string(HTML)
@app.route('/predict_frame', methods=['POST'])
def predict_frame():
data = request.get_json()
if not data or 'image' not in data:
return jsonify({'error': 'no image provided'}), 400
img_b64 = data['image']
# -> Here: you can run your real model on the image bytes (decode and run inference)
# For now, simulate:
res = simulate_prediction_from_image_b64(img_b64)
return jsonify(res)
if __name__ == '__main__':
# Run on 0.0.0.0 if you want to access from other devices on same network
app.run(host='0.0.0.0', port=5000, debug=True, use_reloader=False)