Commit
Β·
7e08013
1
Parent(s):
da4db48
version bump agentic analysis added
Browse files- .claude/settings.local.json +11 -0
- .gitignore +4 -1
- README.md +32 -3
- backend/gradio_labanmovementanalysis/__pycache__/notation_engine.cpython-312.pyc +0 -0
- backend/gradio_labanmovementanalysis/__pycache__/pose_estimation.cpython-312.pyc +0 -0
- backend/gradio_labanmovementanalysis/agentic_analysis.py +450 -0
- backend/gradio_labanmovementanalysis/notation_engine.py +150 -9
- demo/app.py +305 -6
- pyproject.toml +1 -1
.claude/settings.local.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"permissions": {
|
| 3 |
+
"allow": [
|
| 4 |
+
"Bash(/Users/csabi/.nvm/versions/node/v18.20.5/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -n \"smooth_metrics\" /Users/csabi/Develop/Laban-Movement-Analysis/backend/gradio_labanmovementanalysis/notation_engine.py)",
|
| 5 |
+
"Bash(/Users/csabi/.nvm/versions/node/v18.20.5/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -n \"_smooth_metrics\" /Users/csabi/Develop/Laban-Movement-Analysis/backend/gradio_labanmovementanalysis/notation_engine.py)",
|
| 6 |
+
"Bash(/Users/csabi/.nvm/versions/node/v18.20.5/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -n \"# Footer\" /Users/csabi/Develop/Laban-Movement-Analysis/demo/app.py)",
|
| 7 |
+
"Bash(find:*)"
|
| 8 |
+
],
|
| 9 |
+
"deny": []
|
| 10 |
+
}
|
| 11 |
+
}
|
.gitignore
CHANGED
|
@@ -327,4 +327,7 @@ tb_logs/
|
|
| 327 |
outputs/
|
| 328 |
.hydra/
|
| 329 |
|
| 330 |
-
.working/
|
|
|
|
|
|
|
|
|
|
|
|
| 327 |
outputs/
|
| 328 |
.hydra/
|
| 329 |
|
| 330 |
+
.working/
|
| 331 |
+
|
| 332 |
+
# Claude Code project instructions
|
| 333 |
+
CLAUDE.md
|
README.md
CHANGED
|
@@ -20,14 +20,43 @@ tags:
|
|
| 20 |
- mediapipe
|
| 21 |
- yolo
|
| 22 |
- gradio
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
| 24 |
license: apache-2.0
|
| 25 |
---
|
| 26 |
|
| 27 |
-
#
|
| 28 |
<a href="https://pypi.org/project/gradio_labanmovementanalysis/" target="_blank"><img alt="PyPI - Version" src="https://img.shields.io/pypi/v/gradio_labanmovementanalysis"></a>
|
| 29 |
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
## Installation
|
| 33 |
|
|
|
|
| 20 |
- mediapipe
|
| 21 |
- yolo
|
| 22 |
- gradio
|
| 23 |
+
- agentic-analysis
|
| 24 |
+
- overlay-video
|
| 25 |
+
- temporal-patterns
|
| 26 |
+
short_description: Professional movement analysis with 15 pose models, AI-powered insights, and interactive visualization
|
| 27 |
license: apache-2.0
|
| 28 |
---
|
| 29 |
|
| 30 |
+
# π©° Laban Movement Analysis
|
| 31 |
<a href="https://pypi.org/project/gradio_labanmovementanalysis/" target="_blank"><img alt="PyPI - Version" src="https://img.shields.io/pypi/v/gradio_labanmovementanalysis"></a>
|
| 32 |
|
| 33 |
+
**Advanced video movement analysis platform** combining Laban Movement Analysis (LMA) principles with modern AI pose estimation, intelligent analysis, and interactive visualization.
|
| 34 |
+
|
| 35 |
+
## π Key Features
|
| 36 |
+
|
| 37 |
+
### π **Multi-Model Pose Estimation**
|
| 38 |
+
- **15 different pose estimation models** from multiple sources:
|
| 39 |
+
- **MediaPipe**: `mediapipe-lite`, `mediapipe-full`, `mediapipe-heavy`
|
| 40 |
+
- **MoveNet**: `movenet-lightning`, `movenet-thunder`
|
| 41 |
+
- **YOLO v8**: `yolo-v8-n/s/m/l/x` (5 variants)
|
| 42 |
+
- **YOLO v11**: `yolo-v11-n/s/m/l/x` (5 variants)
|
| 43 |
+
|
| 44 |
+
### π₯ **Comprehensive Video Processing**
|
| 45 |
+
- **JSON Analysis Output**: Detailed movement metrics with temporal data
|
| 46 |
+
- **Annotated Video Generation**: Pose overlay with Laban movement data
|
| 47 |
+
- **URL Support**: Direct processing from YouTube, Vimeo, and video URLs
|
| 48 |
+
- **Custom Overlay Component**: `gradio_overlay_video` for controlled layered visualization
|
| 49 |
+
|
| 50 |
+
### π€ **Agentic Intelligence**
|
| 51 |
+
- **SUMMARY Analysis**: Narrative movement interpretation with temporal patterns
|
| 52 |
+
- **STRUCTURED Analysis**: Quantitative breakdowns and statistical insights
|
| 53 |
+
- **MOVEMENT FILTERS**: Pattern detection with intelligent filtering
|
| 54 |
+
- **Laban Interpretation**: Professional movement quality assessment
|
| 55 |
+
|
| 56 |
+
### π¨ **Interactive Visualization**
|
| 57 |
+
- **Standard Analysis Tab**: Core pose estimation and LMA processing
|
| 58 |
+
- **Overlay Visualization Tab**: Interactive layered video display
|
| 59 |
+
- **Agentic Analysis Tab**: AI-powered movement insights and filtering
|
| 60 |
|
| 61 |
## Installation
|
| 62 |
|
backend/gradio_labanmovementanalysis/__pycache__/notation_engine.cpython-312.pyc
CHANGED
|
Binary files a/backend/gradio_labanmovementanalysis/__pycache__/notation_engine.cpython-312.pyc and b/backend/gradio_labanmovementanalysis/__pycache__/notation_engine.cpython-312.pyc differ
|
|
|
backend/gradio_labanmovementanalysis/__pycache__/pose_estimation.cpython-312.pyc
CHANGED
|
Binary files a/backend/gradio_labanmovementanalysis/__pycache__/pose_estimation.cpython-312.pyc and b/backend/gradio_labanmovementanalysis/__pycache__/pose_estimation.cpython-312.pyc differ
|
|
|
backend/gradio_labanmovementanalysis/agentic_analysis.py
ADDED
|
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Agentic Analysis Module for Laban Movement Analysis
|
| 3 |
+
|
| 4 |
+
This module provides intelligent analysis capabilities that go beyond raw pose detection
|
| 5 |
+
to offer meaningful insights about movement patterns, quality, and characteristics.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
from collections import Counter
|
| 10 |
+
from typing import Dict, List, Any, Optional
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def generate_agentic_analysis(
|
| 14 |
+
json_data: Dict[str, Any],
|
| 15 |
+
analysis_type: str,
|
| 16 |
+
filter_direction: str = "any",
|
| 17 |
+
filter_intensity: str = "any",
|
| 18 |
+
filter_min_fluidity: float = 0.0,
|
| 19 |
+
filter_min_expansion: float = 0.0
|
| 20 |
+
) -> Dict[str, Any]:
|
| 21 |
+
"""
|
| 22 |
+
Generate intelligent analysis based on JSON data and selected type.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
json_data: Movement analysis data from pose estimation
|
| 26 |
+
analysis_type: Type of analysis ("summary", "structured", "movement_filters")
|
| 27 |
+
filter_direction: Direction filter for movement_filters analysis
|
| 28 |
+
filter_intensity: Intensity filter for movement_filters analysis
|
| 29 |
+
filter_min_fluidity: Minimum fluidity threshold for movement_filters
|
| 30 |
+
filter_min_expansion: Minimum expansion threshold for movement_filters
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
Dictionary containing analysis results or error information
|
| 34 |
+
"""
|
| 35 |
+
if not json_data or "error" in json_data:
|
| 36 |
+
return {"error": "No valid analysis data available. Please run Standard Analysis first."}
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
# Extract movement data
|
| 40 |
+
if "movement_analysis" not in json_data or "frames" not in json_data["movement_analysis"]:
|
| 41 |
+
return {"error": "Invalid analysis format - missing movement data"}
|
| 42 |
+
|
| 43 |
+
frames = json_data["movement_analysis"]["frames"]
|
| 44 |
+
video_info = json_data.get("video_info", {})
|
| 45 |
+
model_info = json_data.get("analysis_metadata", {}).get("model_info", {})
|
| 46 |
+
|
| 47 |
+
if not frames:
|
| 48 |
+
return {"error": "No movement data found in analysis"}
|
| 49 |
+
|
| 50 |
+
if analysis_type == "summary":
|
| 51 |
+
return generate_summary_analysis(frames, video_info, model_info)
|
| 52 |
+
elif analysis_type == "structured":
|
| 53 |
+
return generate_structured_analysis(frames, video_info, model_info)
|
| 54 |
+
elif analysis_type == "movement_filters":
|
| 55 |
+
return generate_movement_filter_analysis(
|
| 56 |
+
frames, video_info, model_info,
|
| 57 |
+
filter_direction, filter_intensity,
|
| 58 |
+
filter_min_fluidity, filter_min_expansion
|
| 59 |
+
)
|
| 60 |
+
else:
|
| 61 |
+
return {"error": "Unknown analysis type"}
|
| 62 |
+
|
| 63 |
+
except Exception as e:
|
| 64 |
+
return {"error": f"Analysis failed: {str(e)}"}
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def generate_summary_analysis(frames: List[Dict], video_info: Dict, model_info: Dict) -> Dict[str, Any]:
|
| 68 |
+
"""
|
| 69 |
+
Generate a comprehensive movement summary with temporal patterns.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
frames: List of frame analysis data
|
| 73 |
+
video_info: Video metadata
|
| 74 |
+
model_info: Model information
|
| 75 |
+
|
| 76 |
+
Returns:
|
| 77 |
+
Dictionary containing summary analysis with narrative interpretation
|
| 78 |
+
"""
|
| 79 |
+
# Extract metrics
|
| 80 |
+
directions = [f.get("metrics", {}).get("direction", "stationary") for f in frames]
|
| 81 |
+
intensities = [f.get("metrics", {}).get("intensity", "low") for f in frames]
|
| 82 |
+
speeds = [f.get("metrics", {}).get("speed", "slow") for f in frames]
|
| 83 |
+
velocities = [f.get("metrics", {}).get("velocity", 0) for f in frames]
|
| 84 |
+
accelerations = [f.get("metrics", {}).get("acceleration", 0) for f in frames]
|
| 85 |
+
fluidities = [f.get("metrics", {}).get("fluidity", 0) for f in frames]
|
| 86 |
+
expansions = [f.get("metrics", {}).get("expansion", 0) for f in frames]
|
| 87 |
+
|
| 88 |
+
# Calculate dominant characteristics
|
| 89 |
+
dominant_direction = Counter(directions).most_common(1)[0][0]
|
| 90 |
+
dominant_intensity = Counter(intensities).most_common(1)[0][0]
|
| 91 |
+
dominant_speed = Counter(speeds).most_common(1)[0][0]
|
| 92 |
+
|
| 93 |
+
# Calculate temporal patterns
|
| 94 |
+
direction_changes = sum(1 for i in range(1, len(directions)) if directions[i] != directions[i-1])
|
| 95 |
+
intensity_changes = sum(1 for i in range(1, len(intensities)) if intensities[i] != intensities[i-1])
|
| 96 |
+
|
| 97 |
+
# Statistical analysis
|
| 98 |
+
avg_velocity = np.mean(velocities) if velocities else 0
|
| 99 |
+
max_velocity = np.max(velocities) if velocities else 0
|
| 100 |
+
avg_acceleration = np.mean(accelerations) if accelerations else 0
|
| 101 |
+
avg_fluidity = np.mean(fluidities) if fluidities else 0
|
| 102 |
+
avg_expansion = np.mean(expansions) if expansions else 0
|
| 103 |
+
|
| 104 |
+
# Movement complexity score
|
| 105 |
+
complexity_score = (direction_changes / len(frames)) * 0.4 + (intensity_changes / len(frames)) * 0.3 + (avg_fluidity * 0.3)
|
| 106 |
+
|
| 107 |
+
# Generate narrative summary
|
| 108 |
+
duration = video_info.get("duration_seconds", len(frames) / video_info.get("fps", 25))
|
| 109 |
+
|
| 110 |
+
return {
|
| 111 |
+
"analysis_type": "Movement Summary",
|
| 112 |
+
"model_used": model_info.get("name", "unknown"),
|
| 113 |
+
"video_duration": f"{duration:.1f} seconds",
|
| 114 |
+
"total_frames": len(frames),
|
| 115 |
+
|
| 116 |
+
"dominant_characteristics": {
|
| 117 |
+
"primary_direction": dominant_direction,
|
| 118 |
+
"primary_intensity": dominant_intensity,
|
| 119 |
+
"primary_speed": dominant_speed
|
| 120 |
+
},
|
| 121 |
+
|
| 122 |
+
"temporal_patterns": {
|
| 123 |
+
"direction_transitions": direction_changes,
|
| 124 |
+
"intensity_variations": intensity_changes,
|
| 125 |
+
"movement_consistency": f"{(1 - direction_changes/len(frames))*100:.1f}%",
|
| 126 |
+
"complexity_score": f"{complexity_score:.3f}"
|
| 127 |
+
},
|
| 128 |
+
|
| 129 |
+
"movement_quality": {
|
| 130 |
+
"average_fluidity": f"{avg_fluidity:.3f}",
|
| 131 |
+
"average_expansion": f"{avg_expansion:.3f}",
|
| 132 |
+
"peak_velocity": f"{max_velocity:.3f}",
|
| 133 |
+
"average_velocity": f"{avg_velocity:.3f}"
|
| 134 |
+
},
|
| 135 |
+
|
| 136 |
+
"narrative_summary": f"This {duration:.1f}-second movement sequence shows predominantly {dominant_direction} movement with {dominant_intensity} intensity. "
|
| 137 |
+
f"The performer demonstrates {direction_changes} directional changes and {intensity_changes} intensity variations, "
|
| 138 |
+
f"indicating a {'complex' if complexity_score > 0.3 else 'simple'} movement pattern. "
|
| 139 |
+
f"Movement quality shows {avg_fluidity:.2f} fluidity and {avg_expansion:.2f} spatial expansion, "
|
| 140 |
+
f"suggesting {'expressive' if avg_expansion > 0.5 else 'contained'} movement vocabulary.",
|
| 141 |
+
|
| 142 |
+
"laban_interpretation": {
|
| 143 |
+
"effort_qualities": f"Primary effort: {dominant_intensity} intensity with {dominant_speed} timing",
|
| 144 |
+
"space_usage": f"{'Expansive' if avg_expansion > 0.5 else 'Contracted'} spatial patterns",
|
| 145 |
+
"flow_quality": f"{'Bound' if avg_fluidity < 0.3 else 'Free' if avg_fluidity > 0.7 else 'Balanced'} flow"
|
| 146 |
+
}
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def generate_structured_analysis(frames: List[Dict], video_info: Dict, model_info: Dict) -> Dict[str, Any]:
|
| 151 |
+
"""
|
| 152 |
+
Generate detailed structured analysis with metrics breakdown.
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
frames: List of frame analysis data
|
| 156 |
+
video_info: Video metadata
|
| 157 |
+
model_info: Model information
|
| 158 |
+
|
| 159 |
+
Returns:
|
| 160 |
+
Dictionary containing detailed quantitative analysis
|
| 161 |
+
"""
|
| 162 |
+
# Extract all metrics
|
| 163 |
+
directions = [f.get("metrics", {}).get("direction", "stationary") for f in frames]
|
| 164 |
+
intensities = [f.get("metrics", {}).get("intensity", "low") for f in frames]
|
| 165 |
+
speeds = [f.get("metrics", {}).get("speed", "slow") for f in frames]
|
| 166 |
+
velocities = [f.get("metrics", {}).get("velocity", 0) for f in frames]
|
| 167 |
+
accelerations = [f.get("metrics", {}).get("acceleration", 0) for f in frames]
|
| 168 |
+
fluidities = [f.get("metrics", {}).get("fluidity", 0) for f in frames]
|
| 169 |
+
expansions = [f.get("metrics", {}).get("expansion", 0) for f in frames]
|
| 170 |
+
|
| 171 |
+
# Direction analysis
|
| 172 |
+
direction_stats = Counter(directions)
|
| 173 |
+
direction_percentages = {k: (v/len(frames))*100 for k, v in direction_stats.items()}
|
| 174 |
+
|
| 175 |
+
# Intensity analysis
|
| 176 |
+
intensity_stats = Counter(intensities)
|
| 177 |
+
intensity_percentages = {k: (v/len(frames))*100 for k, v in intensity_stats.items()}
|
| 178 |
+
|
| 179 |
+
# Speed analysis
|
| 180 |
+
speed_stats = Counter(speeds)
|
| 181 |
+
speed_percentages = {k: (v/len(frames))*100 for k, v in speed_stats.items()}
|
| 182 |
+
|
| 183 |
+
# Temporal segmentation (divide into segments)
|
| 184 |
+
segment_size = max(1, len(frames) // 8) # 8 segments
|
| 185 |
+
segments = []
|
| 186 |
+
for i in range(0, len(frames), segment_size):
|
| 187 |
+
segment_frames = frames[i:i+segment_size]
|
| 188 |
+
if segment_frames:
|
| 189 |
+
seg_directions = [f.get("metrics", {}).get("direction", "stationary") for f in segment_frames]
|
| 190 |
+
seg_intensities = [f.get("metrics", {}).get("intensity", "low") for f in segment_frames]
|
| 191 |
+
seg_fluidities = [f.get("metrics", {}).get("fluidity", 0) for f in segment_frames]
|
| 192 |
+
|
| 193 |
+
segments.append({
|
| 194 |
+
"segment_index": len(segments) + 1,
|
| 195 |
+
"time_range": f"{i/video_info.get('fps', 25):.1f}-{(i+len(segment_frames))/video_info.get('fps', 25):.1f}s",
|
| 196 |
+
"dominant_direction": Counter(seg_directions).most_common(1)[0][0],
|
| 197 |
+
"dominant_intensity": Counter(seg_intensities).most_common(1)[0][0],
|
| 198 |
+
"average_fluidity": np.mean(seg_fluidities) if seg_fluidities else 0
|
| 199 |
+
})
|
| 200 |
+
|
| 201 |
+
return {
|
| 202 |
+
"analysis_type": "Structured Analysis",
|
| 203 |
+
"model_used": model_info.get("name", "unknown"),
|
| 204 |
+
"video_info": {
|
| 205 |
+
"duration": video_info.get("duration_seconds", 0),
|
| 206 |
+
"fps": video_info.get("fps", 0),
|
| 207 |
+
"resolution": f"{video_info.get('width', 0)}x{video_info.get('height', 0)}",
|
| 208 |
+
"total_frames": len(frames)
|
| 209 |
+
},
|
| 210 |
+
|
| 211 |
+
"direction_breakdown": {
|
| 212 |
+
"statistics": dict(direction_stats),
|
| 213 |
+
"percentages": {k: f"{v:.1f}%" for k, v in direction_percentages.items()},
|
| 214 |
+
"most_common": direction_stats.most_common(3)
|
| 215 |
+
},
|
| 216 |
+
|
| 217 |
+
"intensity_breakdown": {
|
| 218 |
+
"statistics": dict(intensity_stats),
|
| 219 |
+
"percentages": {k: f"{v:.1f}%" for k, v in intensity_percentages.items()},
|
| 220 |
+
"distribution": intensity_stats.most_common()
|
| 221 |
+
},
|
| 222 |
+
|
| 223 |
+
"speed_breakdown": {
|
| 224 |
+
"statistics": dict(speed_stats),
|
| 225 |
+
"percentages": {k: f"{v:.1f}%" for k, v in speed_percentages.items()},
|
| 226 |
+
"distribution": speed_stats.most_common()
|
| 227 |
+
},
|
| 228 |
+
|
| 229 |
+
"quantitative_metrics": {
|
| 230 |
+
"velocity": {
|
| 231 |
+
"mean": f"{np.mean(velocities):.4f}",
|
| 232 |
+
"std": f"{np.std(velocities):.4f}",
|
| 233 |
+
"min": f"{np.min(velocities):.4f}",
|
| 234 |
+
"max": f"{np.max(velocities):.4f}"
|
| 235 |
+
},
|
| 236 |
+
"acceleration": {
|
| 237 |
+
"mean": f"{np.mean(accelerations):.4f}",
|
| 238 |
+
"std": f"{np.std(accelerations):.4f}",
|
| 239 |
+
"min": f"{np.min(accelerations):.4f}",
|
| 240 |
+
"max": f"{np.max(accelerations):.4f}"
|
| 241 |
+
},
|
| 242 |
+
"fluidity": {
|
| 243 |
+
"mean": f"{np.mean(fluidities):.4f}",
|
| 244 |
+
"std": f"{np.std(fluidities):.4f}",
|
| 245 |
+
"min": f"{np.min(fluidities):.4f}",
|
| 246 |
+
"max": f"{np.max(fluidities):.4f}"
|
| 247 |
+
},
|
| 248 |
+
"expansion": {
|
| 249 |
+
"mean": f"{np.mean(expansions):.4f}",
|
| 250 |
+
"std": f"{np.std(expansions):.4f}",
|
| 251 |
+
"min": f"{np.min(expansions):.4f}",
|
| 252 |
+
"max": f"{np.max(expansions):.4f}"
|
| 253 |
+
}
|
| 254 |
+
},
|
| 255 |
+
|
| 256 |
+
"temporal_segments": segments,
|
| 257 |
+
|
| 258 |
+
"movement_patterns": {
|
| 259 |
+
"consistency_index": f"{1 - (len(set(directions))/len(frames)):.3f}",
|
| 260 |
+
"dynamic_range": f"{np.max(velocities) - np.min(velocities):.4f}",
|
| 261 |
+
"complexity_score": f"{len(set(directions)) * len(set(intensities)) / len(frames):.3f}"
|
| 262 |
+
}
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def generate_movement_filter_analysis(
|
| 267 |
+
frames: List[Dict],
|
| 268 |
+
video_info: Dict,
|
| 269 |
+
model_info: Dict,
|
| 270 |
+
filter_direction: str,
|
| 271 |
+
filter_intensity: str,
|
| 272 |
+
filter_min_fluidity: float,
|
| 273 |
+
filter_min_expansion: float
|
| 274 |
+
) -> Dict[str, Any]:
|
| 275 |
+
"""
|
| 276 |
+
Generate movement filter analysis with pattern detection.
|
| 277 |
+
|
| 278 |
+
Args:
|
| 279 |
+
frames: List of frame analysis data
|
| 280 |
+
video_info: Video metadata
|
| 281 |
+
model_info: Model information
|
| 282 |
+
filter_direction: Direction filter criteria
|
| 283 |
+
filter_intensity: Intensity filter criteria
|
| 284 |
+
filter_min_fluidity: Minimum fluidity threshold
|
| 285 |
+
filter_min_expansion: Minimum expansion threshold
|
| 286 |
+
|
| 287 |
+
Returns:
|
| 288 |
+
Dictionary containing filter analysis results and recommendations
|
| 289 |
+
"""
|
| 290 |
+
# Apply filters
|
| 291 |
+
matching_frames = []
|
| 292 |
+
total_frames = len(frames)
|
| 293 |
+
|
| 294 |
+
for frame in frames:
|
| 295 |
+
metrics = frame.get("metrics", {})
|
| 296 |
+
direction = metrics.get("direction", "stationary")
|
| 297 |
+
intensity = metrics.get("intensity", "low")
|
| 298 |
+
fluidity = metrics.get("fluidity", 0)
|
| 299 |
+
expansion = metrics.get("expansion", 0)
|
| 300 |
+
|
| 301 |
+
# Check filters
|
| 302 |
+
direction_match = filter_direction == "any" or direction == filter_direction
|
| 303 |
+
intensity_match = filter_intensity == "any" or intensity == filter_intensity
|
| 304 |
+
fluidity_match = fluidity >= filter_min_fluidity
|
| 305 |
+
expansion_match = expansion >= filter_min_expansion
|
| 306 |
+
|
| 307 |
+
if direction_match and intensity_match and fluidity_match and expansion_match:
|
| 308 |
+
matching_frames.append(frame)
|
| 309 |
+
|
| 310 |
+
match_percentage = (len(matching_frames) / total_frames) * 100 if total_frames > 0 else 0
|
| 311 |
+
|
| 312 |
+
# Pattern detection in matching frames
|
| 313 |
+
if matching_frames:
|
| 314 |
+
match_velocities = [f.get("metrics", {}).get("velocity", 0) for f in matching_frames]
|
| 315 |
+
match_accelerations = [f.get("metrics", {}).get("acceleration", 0) for f in matching_frames]
|
| 316 |
+
|
| 317 |
+
# Find continuous sequences
|
| 318 |
+
frame_indices = [f.get("frame_index", 0) for f in matching_frames]
|
| 319 |
+
sequences = []
|
| 320 |
+
if frame_indices:
|
| 321 |
+
current_seq = [frame_indices[0]]
|
| 322 |
+
for i in range(1, len(frame_indices)):
|
| 323 |
+
if frame_indices[i] == frame_indices[i-1] + 1:
|
| 324 |
+
current_seq.append(frame_indices[i])
|
| 325 |
+
else:
|
| 326 |
+
if len(current_seq) > 1:
|
| 327 |
+
sequences.append(current_seq)
|
| 328 |
+
current_seq = [frame_indices[i]]
|
| 329 |
+
if len(current_seq) > 1:
|
| 330 |
+
sequences.append(current_seq)
|
| 331 |
+
|
| 332 |
+
longest_sequence = max(sequences, key=len) if sequences else []
|
| 333 |
+
|
| 334 |
+
pattern_analysis = {
|
| 335 |
+
"total_matching_frames": len(matching_frames),
|
| 336 |
+
"match_percentage": f"{match_percentage:.1f}%",
|
| 337 |
+
"continuous_sequences": len(sequences),
|
| 338 |
+
"longest_sequence": {
|
| 339 |
+
"length": len(longest_sequence),
|
| 340 |
+
"start_frame": longest_sequence[0] if longest_sequence else 0,
|
| 341 |
+
"end_frame": longest_sequence[-1] if longest_sequence else 0,
|
| 342 |
+
"duration": f"{len(longest_sequence) / video_info.get('fps', 25):.2f}s" if longest_sequence else "0s"
|
| 343 |
+
},
|
| 344 |
+
"velocity_in_matches": {
|
| 345 |
+
"mean": f"{np.mean(match_velocities):.4f}" if match_velocities else "0",
|
| 346 |
+
"peak": f"{np.max(match_velocities):.4f}" if match_velocities else "0"
|
| 347 |
+
}
|
| 348 |
+
}
|
| 349 |
+
else:
|
| 350 |
+
pattern_analysis = {
|
| 351 |
+
"total_matching_frames": 0,
|
| 352 |
+
"match_percentage": "0%",
|
| 353 |
+
"message": "No frames match the specified criteria"
|
| 354 |
+
}
|
| 355 |
+
|
| 356 |
+
return {
|
| 357 |
+
"analysis_type": "Movement Filter Analysis",
|
| 358 |
+
"model_used": model_info.get("name", "unknown"),
|
| 359 |
+
"filter_criteria": {
|
| 360 |
+
"direction": filter_direction,
|
| 361 |
+
"intensity": filter_intensity,
|
| 362 |
+
"min_fluidity": filter_min_fluidity,
|
| 363 |
+
"min_expansion": filter_min_expansion
|
| 364 |
+
},
|
| 365 |
+
"results": pattern_analysis,
|
| 366 |
+
"recommendations": generate_filter_recommendations(matching_frames, total_frames, filter_direction, filter_intensity)
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def generate_filter_recommendations(matching_frames: List[Dict], total_frames: int, filter_direction: str, filter_intensity: str) -> str:
|
| 371 |
+
"""
|
| 372 |
+
Generate recommendations based on filter results.
|
| 373 |
+
|
| 374 |
+
Args:
|
| 375 |
+
matching_frames: List of frames that match filter criteria
|
| 376 |
+
total_frames: Total number of frames analyzed
|
| 377 |
+
filter_direction: Applied direction filter
|
| 378 |
+
filter_intensity: Applied intensity filter
|
| 379 |
+
|
| 380 |
+
Returns:
|
| 381 |
+
String containing intelligent recommendations
|
| 382 |
+
"""
|
| 383 |
+
match_ratio = len(matching_frames) / total_frames if total_frames > 0 else 0
|
| 384 |
+
|
| 385 |
+
if match_ratio > 0.7:
|
| 386 |
+
return f"Strong pattern detected: {match_ratio*100:.1f}% of movement matches your criteria. This suggests consistent {filter_direction} movement with {filter_intensity} intensity."
|
| 387 |
+
elif match_ratio > 0.3:
|
| 388 |
+
return f"Moderate pattern: {match_ratio*100:.1f}% match suggests intermittent {filter_direction} movement patterns. Consider analyzing temporal distribution."
|
| 389 |
+
elif match_ratio > 0.1:
|
| 390 |
+
return f"Weak pattern: Only {match_ratio*100:.1f}% match. The movement may be more varied than your filter criteria suggest."
|
| 391 |
+
else:
|
| 392 |
+
return f"No significant pattern found ({match_ratio*100:.1f}% match). Consider broadening filter criteria or analyzing different movement qualities."
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
def process_standard_for_agent(json_data: Dict[str, Any], output_format: str = "summary") -> Dict[str, Any]:
|
| 396 |
+
"""
|
| 397 |
+
Convert standard analysis JSON to agent format.
|
| 398 |
+
|
| 399 |
+
Args:
|
| 400 |
+
json_data: Standard movement analysis data
|
| 401 |
+
output_format: Desired output format ("summary", "structured", "json")
|
| 402 |
+
|
| 403 |
+
Returns:
|
| 404 |
+
Dictionary containing converted analysis in agent format
|
| 405 |
+
"""
|
| 406 |
+
if not json_data or "error" in json_data:
|
| 407 |
+
return json_data
|
| 408 |
+
|
| 409 |
+
try:
|
| 410 |
+
# Extract key metrics from standard analysis
|
| 411 |
+
if "movement_analysis" in json_data and "frames" in json_data["movement_analysis"]:
|
| 412 |
+
frames = json_data["movement_analysis"]["frames"]
|
| 413 |
+
if not frames:
|
| 414 |
+
return {"error": "No movement data found"}
|
| 415 |
+
|
| 416 |
+
# Compute dominant characteristics
|
| 417 |
+
directions = [f.get("metrics", {}).get("direction", "stationary") for f in frames]
|
| 418 |
+
intensities = [f.get("metrics", {}).get("intensity", "low") for f in frames]
|
| 419 |
+
speeds = [f.get("metrics", {}).get("speed", "slow") for f in frames]
|
| 420 |
+
fluidities = [f.get("metrics", {}).get("fluidity", 0.0) for f in frames]
|
| 421 |
+
expansions = [f.get("metrics", {}).get("expansion", 0.5) for f in frames]
|
| 422 |
+
|
| 423 |
+
# Find dominant values
|
| 424 |
+
dominant_direction = Counter(directions).most_common(1)[0][0]
|
| 425 |
+
dominant_intensity = Counter(intensities).most_common(1)[0][0]
|
| 426 |
+
dominant_speed = Counter(speeds).most_common(1)[0][0]
|
| 427 |
+
avg_fluidity = sum(fluidities) / len(fluidities) if fluidities else 0.0
|
| 428 |
+
avg_expansion = sum(expansions) / len(expansions) if expansions else 0.5
|
| 429 |
+
|
| 430 |
+
if output_format == "summary":
|
| 431 |
+
return {
|
| 432 |
+
"summary": f"Movement Analysis: Predominantly {dominant_direction} direction with {dominant_intensity} intensity. "
|
| 433 |
+
f"Speed: {dominant_speed}. Fluidity: {avg_fluidity:.2f}, Expansion: {avg_expansion:.2f}"
|
| 434 |
+
}
|
| 435 |
+
elif output_format == "structured":
|
| 436 |
+
return {
|
| 437 |
+
"success": True,
|
| 438 |
+
"direction": dominant_direction,
|
| 439 |
+
"intensity": dominant_intensity,
|
| 440 |
+
"speed": dominant_speed,
|
| 441 |
+
"fluidity": avg_fluidity,
|
| 442 |
+
"expansion": avg_expansion,
|
| 443 |
+
"segments": len(frames)
|
| 444 |
+
}
|
| 445 |
+
else: # json
|
| 446 |
+
return json_data
|
| 447 |
+
|
| 448 |
+
return {"error": "Invalid analysis format"}
|
| 449 |
+
except Exception as e:
|
| 450 |
+
return {"error": f"Conversion failed: {str(e)}"}
|
backend/gradio_labanmovementanalysis/notation_engine.py
CHANGED
|
@@ -60,27 +60,104 @@ class MovementMetrics:
|
|
| 60 |
total_displacement: float = 0.0
|
| 61 |
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
class MovementAnalyzer:
|
| 64 |
"""Analyzes pose sequences to extract LMA-style movement metrics."""
|
| 65 |
|
| 66 |
def __init__(self, fps: float = 30.0,
|
| 67 |
-
velocity_threshold_slow: float = 0.01,
|
| 68 |
-
velocity_threshold_fast: float = 0.1,
|
| 69 |
-
intensity_accel_threshold: float = 0.05
|
|
|
|
|
|
|
| 70 |
"""
|
| 71 |
-
Initialize movement analyzer.
|
| 72 |
|
| 73 |
Args:
|
| 74 |
fps: Frames per second of the video
|
| 75 |
-
velocity_threshold_slow:
|
| 76 |
-
velocity_threshold_fast:
|
| 77 |
-
intensity_accel_threshold:
|
|
|
|
|
|
|
| 78 |
"""
|
| 79 |
self.fps = fps
|
| 80 |
self.frame_duration = 1.0 / fps if fps > 0 else 0.0
|
| 81 |
self.velocity_threshold_slow = velocity_threshold_slow
|
| 82 |
self.velocity_threshold_fast = velocity_threshold_fast
|
| 83 |
self.intensity_accel_threshold = intensity_accel_threshold
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
|
| 85 |
def analyze_movement(self, pose_sequence: List[List[PoseResult]]) -> List[MovementMetrics]:
|
| 86 |
"""
|
|
@@ -126,8 +203,14 @@ class MovementAnalyzer:
|
|
| 126 |
prev_velocity = None # Or prev_velocity.pop(person_id, None)
|
| 127 |
continue
|
| 128 |
|
| 129 |
-
# Compute body center
|
| 130 |
-
center = self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
|
| 132 |
# Initialize metrics for this frame
|
| 133 |
frame_metrics = MovementMetrics(
|
|
@@ -170,9 +253,43 @@ class MovementAnalyzer:
|
|
| 170 |
prev_centers = center
|
| 171 |
prev_velocity = frame_metrics.velocity
|
| 172 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
metrics = self._smooth_metrics(metrics)
|
| 174 |
return metrics
|
| 175 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
def _compute_body_center(self, keypoints: List[Keypoint]) -> Tuple[float, float]:
|
| 177 |
"""Compute the center of mass of the body."""
|
| 178 |
major_joints = ["left_hip", "right_hip", "left_shoulder", "right_shoulder"]
|
|
@@ -278,6 +395,30 @@ class MovementAnalyzer:
|
|
| 278 |
|
| 279 |
return 0.5 # Default neutral expansion if no valid pairs
|
| 280 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 281 |
def _smooth_metrics(self, metrics_list: List[MovementMetrics]) -> List[MovementMetrics]:
|
| 282 |
"""Apply smoothing to reduce noise in metrics using a simple moving average."""
|
| 283 |
window_size = 3
|
|
|
|
| 60 |
total_displacement: float = 0.0
|
| 61 |
|
| 62 |
|
| 63 |
+
class SimpleKalmanFilter:
|
| 64 |
+
"""Lightweight Kalman filter for position/velocity tracking."""
|
| 65 |
+
|
| 66 |
+
def __init__(self, process_noise: float = 0.01, measurement_noise: float = 0.1):
|
| 67 |
+
self.process_noise = process_noise
|
| 68 |
+
self.measurement_noise = measurement_noise
|
| 69 |
+
self.is_initialized = False
|
| 70 |
+
|
| 71 |
+
# State: [x, y, vx, vy]
|
| 72 |
+
self.state = np.zeros(4)
|
| 73 |
+
self.covariance = np.eye(4) * 0.1
|
| 74 |
+
|
| 75 |
+
# Transition matrix (constant velocity model)
|
| 76 |
+
self.F = np.array([[1, 0, 1, 0],
|
| 77 |
+
[0, 1, 0, 1],
|
| 78 |
+
[0, 0, 1, 0],
|
| 79 |
+
[0, 0, 0, 1]])
|
| 80 |
+
|
| 81 |
+
# Measurement matrix (observe position only)
|
| 82 |
+
self.H = np.array([[1, 0, 0, 0],
|
| 83 |
+
[0, 1, 0, 0]])
|
| 84 |
+
|
| 85 |
+
# Process noise matrix
|
| 86 |
+
self.Q = np.eye(4) * process_noise
|
| 87 |
+
|
| 88 |
+
# Measurement noise matrix
|
| 89 |
+
self.R = np.eye(2) * measurement_noise
|
| 90 |
+
|
| 91 |
+
def predict(self, dt: float = 1.0):
|
| 92 |
+
"""Predict next state."""
|
| 93 |
+
# Update transition matrix with time step
|
| 94 |
+
self.F[0, 2] = dt
|
| 95 |
+
self.F[1, 3] = dt
|
| 96 |
+
|
| 97 |
+
# Predict state
|
| 98 |
+
self.state = self.F @ self.state
|
| 99 |
+
self.covariance = self.F @ self.covariance @ self.F.T + self.Q
|
| 100 |
+
|
| 101 |
+
def update(self, measurement: Tuple[float, float]):
|
| 102 |
+
"""Update with measurement."""
|
| 103 |
+
z = np.array([measurement[0], measurement[1]])
|
| 104 |
+
|
| 105 |
+
if not self.is_initialized:
|
| 106 |
+
self.state[:2] = z
|
| 107 |
+
self.is_initialized = True
|
| 108 |
+
return
|
| 109 |
+
|
| 110 |
+
# Calculate Kalman gain
|
| 111 |
+
S = self.H @ self.covariance @ self.H.T + self.R
|
| 112 |
+
K = self.covariance @ self.H.T @ np.linalg.inv(S)
|
| 113 |
+
|
| 114 |
+
# Update state
|
| 115 |
+
y = z - self.H @ self.state
|
| 116 |
+
self.state = self.state + K @ y
|
| 117 |
+
self.covariance = (np.eye(4) - K @ self.H) @ self.covariance
|
| 118 |
+
|
| 119 |
+
def get_position(self) -> Tuple[float, float]:
|
| 120 |
+
"""Get filtered position."""
|
| 121 |
+
return (self.state[0], self.state[1])
|
| 122 |
+
|
| 123 |
+
def get_velocity(self) -> Tuple[float, float]:
|
| 124 |
+
"""Get filtered velocity."""
|
| 125 |
+
return (self.state[2], self.state[3])
|
| 126 |
+
|
| 127 |
+
|
| 128 |
class MovementAnalyzer:
|
| 129 |
"""Analyzes pose sequences to extract LMA-style movement metrics."""
|
| 130 |
|
| 131 |
def __init__(self, fps: float = 30.0,
|
| 132 |
+
velocity_threshold_slow: float = 0.01,
|
| 133 |
+
velocity_threshold_fast: float = 0.1,
|
| 134 |
+
intensity_accel_threshold: float = 0.05,
|
| 135 |
+
use_kalman_filter: bool = True,
|
| 136 |
+
use_adaptive_thresholds: bool = True):
|
| 137 |
"""
|
| 138 |
+
Initialize movement analyzer with advanced features.
|
| 139 |
|
| 140 |
Args:
|
| 141 |
fps: Frames per second of the video
|
| 142 |
+
velocity_threshold_slow: Initial threshold for slow movement
|
| 143 |
+
velocity_threshold_fast: Initial threshold for fast movement
|
| 144 |
+
intensity_accel_threshold: Initial acceleration threshold for intensity
|
| 145 |
+
use_kalman_filter: Whether to use Kalman filtering for tracking
|
| 146 |
+
use_adaptive_thresholds: Whether to adapt thresholds based on video content
|
| 147 |
"""
|
| 148 |
self.fps = fps
|
| 149 |
self.frame_duration = 1.0 / fps if fps > 0 else 0.0
|
| 150 |
self.velocity_threshold_slow = velocity_threshold_slow
|
| 151 |
self.velocity_threshold_fast = velocity_threshold_fast
|
| 152 |
self.intensity_accel_threshold = intensity_accel_threshold
|
| 153 |
+
self.use_kalman_filter = use_kalman_filter
|
| 154 |
+
self.use_adaptive_thresholds = use_adaptive_thresholds
|
| 155 |
+
|
| 156 |
+
# Kalman filter for tracking
|
| 157 |
+
self.kalman_filter = SimpleKalmanFilter() if use_kalman_filter else None
|
| 158 |
+
|
| 159 |
+
# Adaptive threshold parameters
|
| 160 |
+
self.adaptive_thresholds_computed = False
|
| 161 |
|
| 162 |
def analyze_movement(self, pose_sequence: List[List[PoseResult]]) -> List[MovementMetrics]:
|
| 163 |
"""
|
|
|
|
| 203 |
prev_velocity = None # Or prev_velocity.pop(person_id, None)
|
| 204 |
continue
|
| 205 |
|
| 206 |
+
# Compute confidence-weighted body center
|
| 207 |
+
center = self._compute_body_center_weighted(pose.keypoints)
|
| 208 |
+
|
| 209 |
+
# Apply Kalman filtering if enabled
|
| 210 |
+
if self.kalman_filter:
|
| 211 |
+
self.kalman_filter.predict(self.frame_duration)
|
| 212 |
+
self.kalman_filter.update(center)
|
| 213 |
+
center = self.kalman_filter.get_position()
|
| 214 |
|
| 215 |
# Initialize metrics for this frame
|
| 216 |
frame_metrics = MovementMetrics(
|
|
|
|
| 253 |
prev_centers = center
|
| 254 |
prev_velocity = frame_metrics.velocity
|
| 255 |
|
| 256 |
+
# Apply adaptive thresholds if enabled
|
| 257 |
+
if self.use_adaptive_thresholds and not self.adaptive_thresholds_computed:
|
| 258 |
+
self._compute_adaptive_thresholds(metrics)
|
| 259 |
+
# Recompute speed and intensity with new thresholds
|
| 260 |
+
metrics = self._recompute_with_adaptive_thresholds(metrics)
|
| 261 |
+
|
| 262 |
metrics = self._smooth_metrics(metrics)
|
| 263 |
return metrics
|
| 264 |
|
| 265 |
+
def _compute_body_center_weighted(self, keypoints: List[Keypoint]) -> Tuple[float, float]:
|
| 266 |
+
"""Compute confidence-weighted center of mass of the body."""
|
| 267 |
+
major_joints = ["left_hip", "right_hip", "left_shoulder", "right_shoulder"]
|
| 268 |
+
|
| 269 |
+
weighted_x = 0.0
|
| 270 |
+
weighted_y = 0.0
|
| 271 |
+
total_weight = 0.0
|
| 272 |
+
|
| 273 |
+
for kp in keypoints:
|
| 274 |
+
if kp.name in major_joints and kp.confidence > 0.5:
|
| 275 |
+
weight = kp.confidence
|
| 276 |
+
weighted_x += kp.x * weight
|
| 277 |
+
weighted_y += kp.y * weight
|
| 278 |
+
total_weight += weight
|
| 279 |
+
|
| 280 |
+
# Fallback to all keypoints if no major joints found
|
| 281 |
+
if total_weight == 0:
|
| 282 |
+
for kp in keypoints:
|
| 283 |
+
if kp.confidence > 0.3:
|
| 284 |
+
weight = kp.confidence
|
| 285 |
+
weighted_x += kp.x * weight
|
| 286 |
+
weighted_y += kp.y * weight
|
| 287 |
+
total_weight += weight
|
| 288 |
+
|
| 289 |
+
if total_weight > 0:
|
| 290 |
+
return (weighted_x / total_weight, weighted_y / total_weight)
|
| 291 |
+
return (0.5, 0.5)
|
| 292 |
+
|
| 293 |
def _compute_body_center(self, keypoints: List[Keypoint]) -> Tuple[float, float]:
|
| 294 |
"""Compute the center of mass of the body."""
|
| 295 |
major_joints = ["left_hip", "right_hip", "left_shoulder", "right_shoulder"]
|
|
|
|
| 395 |
|
| 396 |
return 0.5 # Default neutral expansion if no valid pairs
|
| 397 |
|
| 398 |
+
def _compute_adaptive_thresholds(self, metrics: List[MovementMetrics]):
|
| 399 |
+
"""Compute adaptive thresholds based on movement characteristics in the video."""
|
| 400 |
+
velocities = [m.velocity for m in metrics if m.velocity > 0]
|
| 401 |
+
accelerations = [abs(m.acceleration) for m in metrics if m.acceleration != 0]
|
| 402 |
+
|
| 403 |
+
if velocities:
|
| 404 |
+
velocity_percentiles = np.percentile(velocities, [25, 75])
|
| 405 |
+
self.velocity_threshold_slow = max(velocity_percentiles[0], 0.005)
|
| 406 |
+
self.velocity_threshold_fast = velocity_percentiles[1]
|
| 407 |
+
|
| 408 |
+
if accelerations:
|
| 409 |
+
accel_75th = np.percentile(accelerations, 75)
|
| 410 |
+
self.intensity_accel_threshold = max(accel_75th * 0.5, 0.01)
|
| 411 |
+
|
| 412 |
+
self.adaptive_thresholds_computed = True
|
| 413 |
+
|
| 414 |
+
def _recompute_with_adaptive_thresholds(self, metrics: List[MovementMetrics]) -> List[MovementMetrics]:
|
| 415 |
+
"""Recompute speed and intensity classifications with adaptive thresholds."""
|
| 416 |
+
for metric in metrics:
|
| 417 |
+
if metric.velocity > 0:
|
| 418 |
+
metric.speed = self._categorize_speed(metric.velocity)
|
| 419 |
+
metric.intensity = self._compute_intensity(metric.acceleration, metric.velocity)
|
| 420 |
+
return metrics
|
| 421 |
+
|
| 422 |
def _smooth_metrics(self, metrics_list: List[MovementMetrics]) -> List[MovementMetrics]:
|
| 423 |
"""Apply smoothing to reduce noise in metrics using a simple moving average."""
|
| 424 |
window_size = 3
|
demo/app.py
CHANGED
|
@@ -5,6 +5,7 @@ Author: Csaba (BladeSzaSza)
|
|
| 5 |
"""
|
| 6 |
import gradio as gr
|
| 7 |
import os
|
|
|
|
| 8 |
# from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
|
| 9 |
from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
|
| 10 |
from gradio_overlay_video import OverlayVideo
|
|
@@ -20,9 +21,11 @@ try:
|
|
| 20 |
MovementIntensity
|
| 21 |
)
|
| 22 |
agent_api = LabanAgentAPI()
|
|
|
|
| 23 |
except Exception as e:
|
| 24 |
print(f"Warning: Agent API not available: {e}")
|
| 25 |
agent_api = None
|
|
|
|
| 26 |
# Initialize components
|
| 27 |
try:
|
| 28 |
analyzer = LabanMovementAnalysis(
|
|
@@ -86,6 +89,55 @@ def process_video_standard(video : str, model : str, include_keypoints : bool) -
|
|
| 86 |
except (RuntimeError, ValueError, OSError) as e:
|
| 87 |
return {"error": str(e)}
|
| 88 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
# ββ 4. Build UI βββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 90 |
def create_demo() -> gr.Blocks:
|
| 91 |
with gr.Blocks(
|
|
@@ -184,12 +236,13 @@ def create_demo() -> gr.Blocks:
|
|
| 184 |
"## See the movement analysis in action with an interactive overlay. "
|
| 185 |
"Analyze video @ π¬ Standard Analysis tab"
|
| 186 |
)
|
| 187 |
-
with gr.Row(equal_height=True, min_height=
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
|
|
|
| 193 |
|
| 194 |
|
| 195 |
# Update overlay when JSON changes
|
|
@@ -205,6 +258,252 @@ def create_demo() -> gr.Blocks:
|
|
| 205 |
inputs=[json_out],
|
| 206 |
outputs=[overlay_video]
|
| 207 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 208 |
|
| 209 |
# Footer
|
| 210 |
with gr.Row():
|
|
|
|
| 5 |
"""
|
| 6 |
import gradio as gr
|
| 7 |
import os
|
| 8 |
+
from pathlib import Path
|
| 9 |
# from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
|
| 10 |
from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
|
| 11 |
from gradio_overlay_video import OverlayVideo
|
|
|
|
| 21 |
MovementIntensity
|
| 22 |
)
|
| 23 |
agent_api = LabanAgentAPI()
|
| 24 |
+
HAS_AGENT_API = True
|
| 25 |
except Exception as e:
|
| 26 |
print(f"Warning: Agent API not available: {e}")
|
| 27 |
agent_api = None
|
| 28 |
+
HAS_AGENT_API = False
|
| 29 |
# Initialize components
|
| 30 |
try:
|
| 31 |
analyzer = LabanMovementAnalysis(
|
|
|
|
| 89 |
except (RuntimeError, ValueError, OSError) as e:
|
| 90 |
return {"error": str(e)}
|
| 91 |
|
| 92 |
+
def process_video_for_agent(video, model, output_format="summary"):
|
| 93 |
+
"""Process video with agent-friendly output format."""
|
| 94 |
+
if not HAS_AGENT_API or agent_api is None:
|
| 95 |
+
return {"error": "Agent API not available"}
|
| 96 |
+
|
| 97 |
+
if not video:
|
| 98 |
+
return {"error": "No video provided"}
|
| 99 |
+
|
| 100 |
+
try:
|
| 101 |
+
model_enum = PoseModel(model)
|
| 102 |
+
result = agent_api.analyze(video, model=model_enum, generate_visualization=False)
|
| 103 |
+
|
| 104 |
+
if output_format == "summary":
|
| 105 |
+
return {"summary": agent_api.get_movement_summary(result)}
|
| 106 |
+
elif output_format == "structured":
|
| 107 |
+
return {
|
| 108 |
+
"success": result.success,
|
| 109 |
+
"direction": result.dominant_direction.value,
|
| 110 |
+
"intensity": result.dominant_intensity.value,
|
| 111 |
+
"speed": result.dominant_speed,
|
| 112 |
+
"fluidity": result.fluidity_score,
|
| 113 |
+
"expansion": result.expansion_score,
|
| 114 |
+
"segments": len(result.movement_segments)
|
| 115 |
+
}
|
| 116 |
+
else: # json
|
| 117 |
+
return result.raw_data
|
| 118 |
+
except Exception as e:
|
| 119 |
+
return {"error": str(e)}
|
| 120 |
+
|
| 121 |
+
# Batch processing removed due to MediaPipe compatibility issues
|
| 122 |
+
|
| 123 |
+
# process_standard_for_agent is now imported from backend
|
| 124 |
+
|
| 125 |
+
# Movement filtering removed due to MediaPipe compatibility issues
|
| 126 |
+
|
| 127 |
+
# Import agentic analysis functions from backend
|
| 128 |
+
try:
|
| 129 |
+
from gradio_labanmovementanalysis.agentic_analysis import (
|
| 130 |
+
generate_agentic_analysis,
|
| 131 |
+
process_standard_for_agent
|
| 132 |
+
)
|
| 133 |
+
except ImportError:
|
| 134 |
+
# Fallback if backend module is not available
|
| 135 |
+
def generate_agentic_analysis(json_data, analysis_type, filter_direction="any", filter_intensity="any", filter_min_fluidity=0.0, filter_min_expansion=0.0):
|
| 136 |
+
return {"error": "Agentic analysis backend not available"}
|
| 137 |
+
|
| 138 |
+
def process_standard_for_agent(json_data, output_format="summary"):
|
| 139 |
+
return {"error": "Agent conversion backend not available"}
|
| 140 |
+
|
| 141 |
# ββ 4. Build UI βββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 142 |
def create_demo() -> gr.Blocks:
|
| 143 |
with gr.Blocks(
|
|
|
|
| 236 |
"## See the movement analysis in action with an interactive overlay. "
|
| 237 |
"Analyze video @ π¬ Standard Analysis tab"
|
| 238 |
)
|
| 239 |
+
with gr.Row(equal_height=True, min_height=240):
|
| 240 |
+
with gr.Column(scale=1):
|
| 241 |
+
overlay_video = OverlayVideo(
|
| 242 |
+
value=(None, json_out),
|
| 243 |
+
autoplay=True,
|
| 244 |
+
interactive=False
|
| 245 |
+
)
|
| 246 |
|
| 247 |
|
| 248 |
# Update overlay when JSON changes
|
|
|
|
| 258 |
inputs=[json_out],
|
| 259 |
outputs=[overlay_video]
|
| 260 |
)
|
| 261 |
+
|
| 262 |
+
# Tab 3: Agentic Analysis
|
| 263 |
+
with gr.Tab("π€ Agentic Analysis"):
|
| 264 |
+
gr.Markdown("""
|
| 265 |
+
### Intelligent Movement Interpretation
|
| 266 |
+
AI-powered analysis using the processed data from the Standard Analysis tab.
|
| 267 |
+
""")
|
| 268 |
+
|
| 269 |
+
with gr.Row(equal_height=True):
|
| 270 |
+
# Left column - Video display (sourced from first tab)
|
| 271 |
+
with gr.Column(scale=1, min_width=400):
|
| 272 |
+
gr.Markdown("**Source Video** *(from Standard Analysis)*")
|
| 273 |
+
agentic_video_display = gr.Video(
|
| 274 |
+
label="Analyzed Video",
|
| 275 |
+
interactive=False,
|
| 276 |
+
height=350
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
# Model info display (sourced from first tab)
|
| 280 |
+
gr.Markdown("**Model Used** *(from Standard Analysis)*")
|
| 281 |
+
agentic_model_display = gr.Textbox(
|
| 282 |
+
label="Pose Model",
|
| 283 |
+
interactive=False,
|
| 284 |
+
value="No analysis completed yet"
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
# Right column - Analysis options and output
|
| 288 |
+
with gr.Column(scale=1, min_width=400):
|
| 289 |
+
gr.Markdown("**Analysis Type**")
|
| 290 |
+
agentic_analysis_type = gr.Radio(
|
| 291 |
+
choices=[
|
| 292 |
+
("π― SUMMARY", "summary"),
|
| 293 |
+
("π STRUCTURED", "structured"),
|
| 294 |
+
("π MOVEMENT FILTERS", "movement_filters")
|
| 295 |
+
],
|
| 296 |
+
value="summary",
|
| 297 |
+
label="Choose Analysis",
|
| 298 |
+
info="Select the type of intelligent analysis"
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
# Movement filters options (shown when movement_filters is selected)
|
| 302 |
+
with gr.Group(visible=False) as movement_filter_options:
|
| 303 |
+
gr.Markdown("**Filter Criteria**")
|
| 304 |
+
filter_direction = gr.Dropdown(
|
| 305 |
+
choices=["any", "up", "down", "left", "right", "forward", "backward", "stationary"],
|
| 306 |
+
value="any",
|
| 307 |
+
label="Dominant Direction"
|
| 308 |
+
)
|
| 309 |
+
filter_intensity = gr.Dropdown(
|
| 310 |
+
choices=["any", "low", "medium", "high"],
|
| 311 |
+
value="any",
|
| 312 |
+
label="Movement Intensity"
|
| 313 |
+
)
|
| 314 |
+
filter_min_fluidity = gr.Slider(0.0, 1.0, 0.0, label="Minimum Fluidity Score")
|
| 315 |
+
filter_min_expansion = gr.Slider(0.0, 1.0, 0.0, label="Minimum Expansion Score")
|
| 316 |
+
|
| 317 |
+
analyze_agentic_btn = gr.Button("π Generate Analysis", variant="primary", size="lg")
|
| 318 |
+
|
| 319 |
+
# Output display
|
| 320 |
+
with gr.Accordion("Analysis Results", open=True):
|
| 321 |
+
agentic_output = gr.JSON(label="Intelligent Analysis Results")
|
| 322 |
+
|
| 323 |
+
# Show/hide movement filter options based on selection
|
| 324 |
+
def toggle_filter_options(analysis_type):
|
| 325 |
+
return gr.Group(visible=(analysis_type == "movement_filters"))
|
| 326 |
+
|
| 327 |
+
agentic_analysis_type.change(
|
| 328 |
+
fn=toggle_filter_options,
|
| 329 |
+
inputs=[agentic_analysis_type],
|
| 330 |
+
outputs=[movement_filter_options]
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
# Update video display when standard analysis completes
|
| 334 |
+
def update_agentic_video_display(video_input, url_input, model):
|
| 335 |
+
"""Update agentic tab with video and model from standard analysis."""
|
| 336 |
+
video_source = video_input if video_input else url_input
|
| 337 |
+
return video_source, f"Model: {model}"
|
| 338 |
+
|
| 339 |
+
# Link to standard analysis inputs
|
| 340 |
+
video_in.change(
|
| 341 |
+
fn=update_agentic_video_display,
|
| 342 |
+
inputs=[video_in, url_input_enh, model_sel],
|
| 343 |
+
outputs=[agentic_video_display, agentic_model_display]
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
url_input_enh.change(
|
| 347 |
+
fn=update_agentic_video_display,
|
| 348 |
+
inputs=[video_in, url_input_enh, model_sel],
|
| 349 |
+
outputs=[agentic_video_display, agentic_model_display]
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
model_sel.change(
|
| 353 |
+
fn=update_agentic_video_display,
|
| 354 |
+
inputs=[video_in, url_input_enh, model_sel],
|
| 355 |
+
outputs=[agentic_video_display, agentic_model_display]
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
# Hook up the Generate Analysis button
|
| 359 |
+
def process_agentic_analysis(json_data, analysis_type, filter_direction, filter_intensity, filter_min_fluidity, filter_min_expansion):
|
| 360 |
+
"""Process agentic analysis based on user selection."""
|
| 361 |
+
return generate_agentic_analysis(
|
| 362 |
+
json_data,
|
| 363 |
+
analysis_type,
|
| 364 |
+
filter_direction,
|
| 365 |
+
filter_intensity,
|
| 366 |
+
filter_min_fluidity,
|
| 367 |
+
filter_min_expansion
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
analyze_agentic_btn.click(
|
| 371 |
+
fn=process_agentic_analysis,
|
| 372 |
+
inputs=[
|
| 373 |
+
json_out, # JSON data from standard analysis
|
| 374 |
+
agentic_analysis_type,
|
| 375 |
+
filter_direction,
|
| 376 |
+
filter_intensity,
|
| 377 |
+
filter_min_fluidity,
|
| 378 |
+
filter_min_expansion
|
| 379 |
+
],
|
| 380 |
+
outputs=[agentic_output],
|
| 381 |
+
api_name="analyze_agentic"
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
# Auto-update agentic analysis when JSON changes and analysis type is summary
|
| 385 |
+
def auto_update_summary(json_data, analysis_type):
|
| 386 |
+
"""Auto-update with summary when new analysis is available."""
|
| 387 |
+
if json_data and analysis_type == "summary":
|
| 388 |
+
return generate_agentic_analysis(json_data, "summary")
|
| 389 |
+
return None
|
| 390 |
+
|
| 391 |
+
json_out.change(
|
| 392 |
+
fn=auto_update_summary,
|
| 393 |
+
inputs=[json_out, agentic_analysis_type],
|
| 394 |
+
outputs=[agentic_output]
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
# Tab 4: About
|
| 398 |
+
with gr.Tab("βΉοΈ About"):
|
| 399 |
+
gr.Markdown("""
|
| 400 |
+
# π©° Developer Journey: Laban Movement Analysis
|
| 401 |
+
|
| 402 |
+
## π― Project Vision
|
| 403 |
+
|
| 404 |
+
Created to bridge the gap between traditional **Laban Movement Analysis (LMA)** principles and modern **AI-powered pose estimation**, this platform represents a comprehensive approach to understanding human movement through technology.
|
| 405 |
+
|
| 406 |
+
## π οΈ Technical Architecture
|
| 407 |
+
|
| 408 |
+
### **Core Foundation**
|
| 409 |
+
- **15 Pose Estimation Models** from diverse sources and frameworks
|
| 410 |
+
- **Multi-format Video Processing** with URL support (YouTube, Vimeo, direct links)
|
| 411 |
+
- **Real-time Analysis Pipeline** with configurable model selection
|
| 412 |
+
- **MCP-Compatible API** for AI agent integration
|
| 413 |
+
|
| 414 |
+
### **Pose Model Ecosystem**
|
| 415 |
+
```
|
| 416 |
+
π MediaPipe Family (Google) β 3 variants (lite/full/heavy)
|
| 417 |
+
β‘ MoveNet Family (TensorFlow) β 2 variants (lightning/thunder)
|
| 418 |
+
π― YOLO v8 Family (Ultralytics) β 5 variants (n/s/m/l/x)
|
| 419 |
+
π₯ YOLO v11 Family (Ultralytics)β 5 variants (n/s/m/l/x)
|
| 420 |
+
```
|
| 421 |
+
|
| 422 |
+
## π¨ Innovation Highlights
|
| 423 |
+
|
| 424 |
+
### **1. Custom Gradio Component: `gradio_overlay_video`**
|
| 425 |
+
- **Layered Visualization**: Controlled overlay of pose data on original video
|
| 426 |
+
- **Interactive Controls**: Frame-by-frame analysis with movement metrics
|
| 427 |
+
- **Synchronized Playback**: Real-time correlation between video and data
|
| 428 |
+
|
| 429 |
+
### **2. Agentic Analysis Engine**
|
| 430 |
+
Beyond raw pose detection, we've developed intelligent interpretation layers:
|
| 431 |
+
|
| 432 |
+
- **π― SUMMARY**: Narrative movement interpretation with temporal pattern analysis
|
| 433 |
+
- **π STRUCTURED**: Comprehensive quantitative breakdowns with statistical insights
|
| 434 |
+
- **π MOVEMENT FILTERS**: Advanced pattern detection with customizable criteria
|
| 435 |
+
|
| 436 |
+
### **3. Temporal Pattern Recognition**
|
| 437 |
+
- **Movement Consistency Tracking**: Direction and intensity variation analysis
|
| 438 |
+
- **Complexity Scoring**: Multi-dimensional movement sophistication metrics
|
| 439 |
+
- **Sequence Detection**: Continuous movement pattern identification
|
| 440 |
+
- **Laban Integration**: Professional movement quality assessment using LMA principles
|
| 441 |
+
|
| 442 |
+
## π Processing Pipeline
|
| 443 |
+
|
| 444 |
+
```mermaid
|
| 445 |
+
Video Input β Pose Detection β LMA Analysis β JSON Output
|
| 446 |
+
β β β β
|
| 447 |
+
URL/Upload β 15 Models β Temporal β Visualization
|
| 448 |
+
β β Patterns β
|
| 449 |
+
Preprocessing β Keypoints β Metrics β Agentic Analysis
|
| 450 |
+
```
|
| 451 |
+
|
| 452 |
+
## π Laban Movement Analysis Integration
|
| 453 |
+
|
| 454 |
+
Our implementation translates raw pose coordinates into meaningful movement qualities:
|
| 455 |
+
|
| 456 |
+
- **Effort Qualities**: Intensity, speed, and flow characteristics
|
| 457 |
+
- **Space Usage**: Expansion patterns and directional preferences
|
| 458 |
+
- **Temporal Dynamics**: Rhythm, acceleration, and movement consistency
|
| 459 |
+
- **Quality Assessment**: Fluidity scores and movement sophistication
|
| 460 |
+
|
| 461 |
+
## π¬ Technical Achievements
|
| 462 |
+
|
| 463 |
+
### **Multi-Source Model Integration**
|
| 464 |
+
Successfully unified models from different frameworks:
|
| 465 |
+
- Google's MediaPipe (BlazePose architecture)
|
| 466 |
+
- TensorFlow's MoveNet (lightweight and accurate variants)
|
| 467 |
+
- Ultralytics' YOLO ecosystem (object detection adapted for pose)
|
| 468 |
+
|
| 469 |
+
### **Real-Time Processing Capabilities**
|
| 470 |
+
- **Streaming Support**: Frame-by-frame processing with temporal continuity
|
| 471 |
+
- **Memory Optimization**: Efficient handling of large video files
|
| 472 |
+
- **Error Recovery**: Graceful handling of pose detection failures
|
| 473 |
+
|
| 474 |
+
### **Agent-Ready Architecture**
|
| 475 |
+
- **MCP Server Integration**: Compatible with AI agent workflows
|
| 476 |
+
- **Structured API**: RESTful endpoints for programmatic access
|
| 477 |
+
- **Flexible Output Formats**: JSON, visualization videos, and metadata
|
| 478 |
+
|
| 479 |
+
## π Future Roadmap
|
| 480 |
+
|
| 481 |
+
- **3D Pose Integration**: Depth-aware movement analysis
|
| 482 |
+
- **Multi-Person Tracking**: Ensemble and group movement dynamics
|
| 483 |
+
- **Real-Time Streaming**: Live movement analysis capabilities
|
| 484 |
+
- **Machine Learning Enhancement**: Custom models trained on movement data
|
| 485 |
+
|
| 486 |
+
## π§ Built With
|
| 487 |
+
|
| 488 |
+
- **Frontend**: Gradio 5.33+ with custom Svelte components
|
| 489 |
+
- **Backend**: Python with FastAPI and async processing
|
| 490 |
+
- **Computer Vision**: MediaPipe, TensorFlow, PyTorch, Ultralytics
|
| 491 |
+
- **Analysis**: NumPy, OpenCV, custom Laban algorithms
|
| 492 |
+
- **Deployment**: Hugging Face Spaces with Docker support
|
| 493 |
+
|
| 494 |
+
---
|
| 495 |
+
|
| 496 |
+
### π¨βπ» Created by **Csaba BolyΓ³s**
|
| 497 |
+
|
| 498 |
+
*Combining classical movement analysis with cutting-edge AI to unlock new possibilities in human movement understanding.*
|
| 499 |
+
|
| 500 |
+
**Connect:**
|
| 501 |
+
[GitHub](https://github.com/bladeszasza) β’ [Hugging Face](https://huggingface.co/BladeSzaSza) β’ [LinkedIn](https://www.linkedin.com/in/csaba-bolyΓ³s-00a11767/)
|
| 502 |
+
|
| 503 |
+
---
|
| 504 |
+
|
| 505 |
+
> *"Movement is a language. Technology helps us understand what the body is saying."*
|
| 506 |
+
""")
|
| 507 |
|
| 508 |
# Footer
|
| 509 |
with gr.Row():
|
pyproject.toml
CHANGED
|
@@ -8,7 +8,7 @@ build-backend = "hatchling.build"
|
|
| 8 |
|
| 9 |
[project]
|
| 10 |
name = "gradio_labanmovementanalysis"
|
| 11 |
-
version = "0.0.
|
| 12 |
description = "A Gradio 5 component for video movement analysis using Laban Movement Analysis (LMA) with MCP support for AI agents"
|
| 13 |
readme = "README.md"
|
| 14 |
license = "apache-2.0"
|
|
|
|
| 8 |
|
| 9 |
[project]
|
| 10 |
name = "gradio_labanmovementanalysis"
|
| 11 |
+
version = "0.0.7"
|
| 12 |
description = "A Gradio 5 component for video movement analysis using Laban Movement Analysis (LMA) with MCP support for AI agents"
|
| 13 |
readme = "README.md"
|
| 14 |
license = "apache-2.0"
|