Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,3 @@
|
|
| 1 |
-
# app.py
|
| 2 |
-
# FINAL VERSION — No OpenCV. Works on Hugging Face Spaces.
|
| 3 |
-
# Dark theme + Glassmorphism + React autoplay preview
|
| 4 |
-
# Just upload this + best_model.pth
|
| 5 |
|
| 6 |
import os
|
| 7 |
import subprocess
|
|
@@ -12,6 +8,7 @@ from PIL import Image
|
|
| 12 |
import gradio as gr
|
| 13 |
import tempfile
|
| 14 |
import base64
|
|
|
|
| 15 |
|
| 16 |
SEQUENCE_LENGTH = 16
|
| 17 |
NUM_CLASSES = 4
|
|
@@ -46,7 +43,7 @@ class CNNLSTM(nn.Module):
|
|
| 46 |
# ------------------ LOAD MODEL ------------------
|
| 47 |
def load_model():
|
| 48 |
if not os.path.exists(MODEL_PATH):
|
| 49 |
-
raise FileNotFoundError("Upload best_model.pth to the
|
| 50 |
model = CNNLSTM(NUM_CLASSES).to(device)
|
| 51 |
model.load_state_dict(torch.load(MODEL_PATH, map_location=device))
|
| 52 |
model.eval()
|
|
@@ -57,44 +54,33 @@ try:
|
|
| 57 |
except:
|
| 58 |
model = None
|
| 59 |
|
| 60 |
-
|
| 61 |
# ------------------ FRAME EXTRACTION (FFmpeg) ------------------
|
| 62 |
def extract_frames_ffmpeg(video_path):
|
| 63 |
-
"""
|
| 64 |
-
Extract 16 evenly spaced frames using FFmpeg (preinstalled on Hugging Face Spaces).
|
| 65 |
-
Returns list[PIL.Image].
|
| 66 |
-
"""
|
| 67 |
-
|
| 68 |
tmp_dir = tempfile.mkdtemp()
|
|
|
|
| 69 |
|
| 70 |
cmd = [
|
| 71 |
"ffmpeg",
|
| 72 |
"-i", video_path,
|
| 73 |
-
"-vf",
|
| 74 |
-
|
| 75 |
"-hide_banner",
|
| 76 |
"-loglevel", "error"
|
| 77 |
]
|
| 78 |
-
|
| 79 |
subprocess.run(cmd)
|
| 80 |
|
| 81 |
-
|
| 82 |
|
| 83 |
-
if len(
|
| 84 |
return None
|
| 85 |
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
idxs = np.linspace(0, len(frames)-1, SEQUENCE_LENGTH).astype(int)
|
| 90 |
-
frames = [frames[i] for i in idxs]
|
| 91 |
else:
|
| 92 |
-
|
| 93 |
-
frames = (frames * 16)[:16]
|
| 94 |
-
|
| 95 |
-
pil_frames = [Image.open(f).convert("RGB") for f in frames]
|
| 96 |
-
return pil_frames
|
| 97 |
|
|
|
|
| 98 |
|
| 99 |
# ------------------ PREDICTION ------------------
|
| 100 |
transform = transforms.Compose([
|
|
@@ -102,15 +88,15 @@ transform = transforms.Compose([
|
|
| 102 |
transforms.ToTensor(),
|
| 103 |
])
|
| 104 |
|
| 105 |
-
def
|
| 106 |
if model is None:
|
| 107 |
-
return {"Error": "Model not loaded
|
| 108 |
|
| 109 |
tensors = [transform(f) for f in frames]
|
| 110 |
-
|
| 111 |
|
| 112 |
with torch.no_grad():
|
| 113 |
-
out = model(
|
| 114 |
|
| 115 |
probs = torch.softmax(out, dim=1)[0].cpu().numpy()
|
| 116 |
|
|
@@ -118,50 +104,54 @@ def run_prediction(frames):
|
|
| 118 |
|
| 119 |
def predict(files):
|
| 120 |
if files is None:
|
| 121 |
-
return {"Error": "Upload a file
|
| 122 |
|
| 123 |
-
# Normalize file list
|
| 124 |
if isinstance(files, str):
|
| 125 |
files = [files]
|
| 126 |
|
| 127 |
-
#
|
| 128 |
if len(files) == 1 and files[0].lower().endswith((".mp4",".mov",".avi",".mkv",".webm")):
|
| 129 |
frames = extract_frames_ffmpeg(files[0])
|
| 130 |
if frames is None:
|
| 131 |
-
return {"Error": "
|
| 132 |
-
return
|
| 133 |
|
| 134 |
-
#
|
| 135 |
-
if len(files) >=
|
| 136 |
-
|
| 137 |
-
return
|
| 138 |
|
| 139 |
-
#
|
| 140 |
try:
|
| 141 |
img = Image.open(files[0]).convert("RGB")
|
| 142 |
-
frames = [img] *
|
| 143 |
-
return
|
| 144 |
except:
|
| 145 |
-
return {"Error": "Invalid image
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
body, .gradio-container {
|
|
|
|
|
|
|
|
|
|
| 151 |
.glass {
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
}
|
|
|
|
| 159 |
"""
|
| 160 |
|
|
|
|
| 161 |
react_html = """
|
| 162 |
<div class="glass">
|
| 163 |
-
<h1 style="margin:0;
|
| 164 |
-
<p style="opacity:0.7;">
|
| 165 |
<div id="react-root"></div>
|
| 166 |
</div>
|
| 167 |
|
|
@@ -173,47 +163,41 @@ const e = React.createElement;
|
|
| 173 |
|
| 174 |
function App(){
|
| 175 |
const [frames,setFrames] = React.useState([]);
|
| 176 |
-
const [
|
| 177 |
|
| 178 |
React.useEffect(()=>{
|
| 179 |
-
const
|
| 180 |
-
if(!
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
r.onload = ()=>res(r.result);
|
| 191 |
-
r.readAsDataURL(file);
|
| 192 |
});
|
| 193 |
});
|
| 194 |
|
| 195 |
-
Promise.all(
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
setIndex(0);
|
| 200 |
});
|
| 201 |
-
};
|
| 202 |
-
|
| 203 |
-
fileInput.addEventListener("change",handle);
|
| 204 |
-
return ()=>fileInput.removeEventListener("change",handle);
|
| 205 |
},[]);
|
| 206 |
|
| 207 |
React.useEffect(()=>{
|
| 208 |
-
if(frames.length
|
| 209 |
-
const t
|
| 210 |
return ()=>clearInterval(t);
|
| 211 |
},[frames]);
|
| 212 |
|
| 213 |
return e("div",{},
|
| 214 |
-
frames.length
|
| 215 |
-
? e("img",{src:frames[
|
| 216 |
-
: e("p",{style:{opacity:0.
|
| 217 |
);
|
| 218 |
}
|
| 219 |
|
|
@@ -221,21 +205,21 @@ ReactDOM.createRoot(document.getElementById("react-root")).render(e(App));
|
|
| 221 |
</script>
|
| 222 |
"""
|
| 223 |
|
| 224 |
-
|
| 225 |
-
|
|
|
|
| 226 |
gr.HTML(react_html)
|
| 227 |
|
| 228 |
file_input = gr.File(
|
| 229 |
-
label="Upload
|
| 230 |
file_count="multiple",
|
| 231 |
type="filepath",
|
| 232 |
elem_id="media_input"
|
| 233 |
)
|
| 234 |
|
| 235 |
-
btn = gr.Button("Analyze
|
| 236 |
-
|
| 237 |
output = gr.Label(num_top_classes=4)
|
| 238 |
|
| 239 |
-
btn.click(
|
| 240 |
|
| 241 |
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
|
| 2 |
import os
|
| 3 |
import subprocess
|
|
|
|
| 8 |
import gradio as gr
|
| 9 |
import tempfile
|
| 10 |
import base64
|
| 11 |
+
import numpy as np
|
| 12 |
|
| 13 |
SEQUENCE_LENGTH = 16
|
| 14 |
NUM_CLASSES = 4
|
|
|
|
| 43 |
# ------------------ LOAD MODEL ------------------
|
| 44 |
def load_model():
|
| 45 |
if not os.path.exists(MODEL_PATH):
|
| 46 |
+
raise FileNotFoundError("Upload best_model.pth to the Space!")
|
| 47 |
model = CNNLSTM(NUM_CLASSES).to(device)
|
| 48 |
model.load_state_dict(torch.load(MODEL_PATH, map_location=device))
|
| 49 |
model.eval()
|
|
|
|
| 54 |
except:
|
| 55 |
model = None
|
| 56 |
|
|
|
|
| 57 |
# ------------------ FRAME EXTRACTION (FFmpeg) ------------------
|
| 58 |
def extract_frames_ffmpeg(video_path):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
tmp_dir = tempfile.mkdtemp()
|
| 60 |
+
out_pattern = os.path.join(tmp_dir, "frame_%03d.jpg")
|
| 61 |
|
| 62 |
cmd = [
|
| 63 |
"ffmpeg",
|
| 64 |
"-i", video_path,
|
| 65 |
+
"-vf", "fps=1,scale=320:180",
|
| 66 |
+
out_pattern,
|
| 67 |
"-hide_banner",
|
| 68 |
"-loglevel", "error"
|
| 69 |
]
|
|
|
|
| 70 |
subprocess.run(cmd)
|
| 71 |
|
| 72 |
+
jpgs = sorted([os.path.join(tmp_dir, f) for f in os.listdir(tmp_dir) if f.endswith(".jpg")])
|
| 73 |
|
| 74 |
+
if len(jpgs) == 0:
|
| 75 |
return None
|
| 76 |
|
| 77 |
+
if len(jpgs) >= SEQUENCE_LENGTH:
|
| 78 |
+
idxs = np.linspace(0, len(jpgs)-1, SEQUENCE_LENGTH).astype(int)
|
| 79 |
+
jpgs = [jpgs[i] for i in idxs]
|
|
|
|
|
|
|
| 80 |
else:
|
| 81 |
+
jpgs = (jpgs * SEQUENCE_LENGTH)[:SEQUENCE_LENGTH]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
+
return [Image.open(f).convert("RGB") for f in jpgs]
|
| 84 |
|
| 85 |
# ------------------ PREDICTION ------------------
|
| 86 |
transform = transforms.Compose([
|
|
|
|
| 88 |
transforms.ToTensor(),
|
| 89 |
])
|
| 90 |
|
| 91 |
+
def do_predict(frames):
|
| 92 |
if model is None:
|
| 93 |
+
return {"Error": "Model not loaded"}
|
| 94 |
|
| 95 |
tensors = [transform(f) for f in frames]
|
| 96 |
+
tensor = torch.stack(tensors).unsqueeze(0).to(device)
|
| 97 |
|
| 98 |
with torch.no_grad():
|
| 99 |
+
out = model(tensor)
|
| 100 |
|
| 101 |
probs = torch.softmax(out, dim=1)[0].cpu().numpy()
|
| 102 |
|
|
|
|
| 104 |
|
| 105 |
def predict(files):
|
| 106 |
if files is None:
|
| 107 |
+
return {"Error": "Upload a file first!"}
|
| 108 |
|
|
|
|
| 109 |
if isinstance(files, str):
|
| 110 |
files = [files]
|
| 111 |
|
| 112 |
+
# Video
|
| 113 |
if len(files) == 1 and files[0].lower().endswith((".mp4",".mov",".avi",".mkv",".webm")):
|
| 114 |
frames = extract_frames_ffmpeg(files[0])
|
| 115 |
if frames is None:
|
| 116 |
+
return {"Error": "FFmpeg could not extract frames!"}
|
| 117 |
+
return do_predict(frames)
|
| 118 |
|
| 119 |
+
# Multiple images
|
| 120 |
+
if len(files) >= SEQUENCE_LENGTH:
|
| 121 |
+
imgs = [Image.open(f).convert("RGB") for f in files[:16]]
|
| 122 |
+
return do_predict(imgs)
|
| 123 |
|
| 124 |
+
# Single image
|
| 125 |
try:
|
| 126 |
img = Image.open(files[0]).convert("RGB")
|
| 127 |
+
frames = [img] * SEQUENCE_LENGTH
|
| 128 |
+
return do_predict(frames)
|
| 129 |
except:
|
| 130 |
+
return {"Error": "Invalid image"}
|
| 131 |
+
|
| 132 |
+
# ------------------ CSS (insert via HTML) ------------------
|
| 133 |
+
css_html = """
|
| 134 |
+
<style>
|
| 135 |
+
body, .gradio-container {
|
| 136 |
+
background: #0b0f12 !important;
|
| 137 |
+
color: white !important;
|
| 138 |
+
}
|
| 139 |
.glass {
|
| 140 |
+
backdrop-filter: blur(12px) saturate(180%);
|
| 141 |
+
background: rgba(255,255,255,0.06);
|
| 142 |
+
border-radius: 16px;
|
| 143 |
+
padding: 20px;
|
| 144 |
+
border: 1px solid rgba(255,255,255,0.08);
|
| 145 |
+
box-shadow: 0 4px 40px rgba(0,0,0,0.4);
|
| 146 |
}
|
| 147 |
+
</style>
|
| 148 |
"""
|
| 149 |
|
| 150 |
+
# ------------------ REACT FRONTEND ------------------
|
| 151 |
react_html = """
|
| 152 |
<div class="glass">
|
| 153 |
+
<h1 style="margin:0;font-size:28px;">Crowd Behavior Analyzer</h1>
|
| 154 |
+
<p style="opacity:0.7;">Dark • Glassmorphism • React Autoplay Preview</p>
|
| 155 |
<div id="react-root"></div>
|
| 156 |
</div>
|
| 157 |
|
|
|
|
| 163 |
|
| 164 |
function App(){
|
| 165 |
const [frames,setFrames] = React.useState([]);
|
| 166 |
+
const [i,setI] = React.useState(0);
|
| 167 |
|
| 168 |
React.useEffect(()=>{
|
| 169 |
+
const inp = document.getElementById("media_input");
|
| 170 |
+
if(!inp) return;
|
| 171 |
+
inp.addEventListener("change",() =>{
|
| 172 |
+
const files = inp.files;
|
| 173 |
+
if(!files || !files.length) return;
|
| 174 |
+
|
| 175 |
+
const picks = [...files].slice(0,16).map(f => {
|
| 176 |
+
return new Promise(res=>{
|
| 177 |
+
const r=new FileReader();
|
| 178 |
+
r.onload=()=>res(r.result);
|
| 179 |
+
r.readAsDataURL(f);
|
|
|
|
|
|
|
| 180 |
});
|
| 181 |
});
|
| 182 |
|
| 183 |
+
Promise.all(picks).then(data=>{
|
| 184 |
+
while(data.length < 16) data.push(data[0]);
|
| 185 |
+
setFrames(data);
|
| 186 |
+
setI(0);
|
|
|
|
| 187 |
});
|
| 188 |
+
});
|
|
|
|
|
|
|
|
|
|
| 189 |
},[]);
|
| 190 |
|
| 191 |
React.useEffect(()=>{
|
| 192 |
+
if(!frames.length) return;
|
| 193 |
+
const t=setInterval(()=>setI(x=>(x+1)%frames.length),300);
|
| 194 |
return ()=>clearInterval(t);
|
| 195 |
},[frames]);
|
| 196 |
|
| 197 |
return e("div",{},
|
| 198 |
+
frames.length
|
| 199 |
+
? e("img",{src:frames[i],style:{width:"100%",borderRadius:"12px"}})
|
| 200 |
+
: e("p",{style:{opacity:0.5}},"Preview will appear here after upload.")
|
| 201 |
);
|
| 202 |
}
|
| 203 |
|
|
|
|
| 205 |
</script>
|
| 206 |
"""
|
| 207 |
|
| 208 |
+
# ------------------ UI ------------------
|
| 209 |
+
with gr.Blocks() as demo:
|
| 210 |
+
gr.HTML(css_html)
|
| 211 |
gr.HTML(react_html)
|
| 212 |
|
| 213 |
file_input = gr.File(
|
| 214 |
+
label="Upload video or multiple images",
|
| 215 |
file_count="multiple",
|
| 216 |
type="filepath",
|
| 217 |
elem_id="media_input"
|
| 218 |
)
|
| 219 |
|
| 220 |
+
btn = gr.Button("Analyze", variant="primary")
|
|
|
|
| 221 |
output = gr.Label(num_top_classes=4)
|
| 222 |
|
| 223 |
+
btn.click(predict, file_input, output)
|
| 224 |
|
| 225 |
demo.launch()
|