Csaba Bolyos commited on
Commit
7df5245
Β·
1 Parent(s): 42b2e74

linked the backend back

Browse files
Files changed (5) hide show
  1. README.md +160 -51
  2. demo/app.py +135 -30
  3. demo/space.py +154 -44
  4. pyproject.toml +1 -1
  5. version.py +0 -2
README.md CHANGED
@@ -44,74 +44,178 @@ Author: Csaba (BladeSzaSza)
44
  """
45
 
46
  import gradio as gr
47
-
48
-
49
- # ── 3. Dummy backend for local dev (replace with real fn) ───────
50
- def process_video_standard(video, model, viz, kp):
51
- """Return empty JSON + passthrough video placeholder."""
52
- return {}, video
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  # ── 4. Build UI ─────────────────────────────────────────────────
55
  def create_demo() -> gr.Blocks:
56
  with gr.Blocks(
57
- title="Laban Movement Analysis – Complete Suite",
58
  theme='gstaff/sketch',
59
  fill_width=True,
60
  ) as demo:
61
 
62
  # ── Hero banner ──
63
- gr.HTML(
64
  """
65
- <div class="main-header">
66
- <h1>🎭 Laban Movement Analysis – Complete Suite</h1>
67
- <p>Pose estimation β€’ AI action recognition β€’ Real-time agents</p>
68
- <p style="font-size:.85rem;opacity:.85">v0.01-beta β€’ 20+ pose models β€’ MCP</p>
69
- </div>
70
  """
71
  )
72
-
73
- # ── Workspace ──
74
- with gr.Row(equal_height=True):
75
- # Input column
76
- with gr.Column(scale=1, min_width=260):
77
- video_in = gr.Video(label="Upload Video", sources=["upload"], format="mp4")
78
- model_sel = gr.Dropdown(
79
- ["mediapipe", "movenet", "yolo"], value="mediapipe", label="Pose Model"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  )
81
- with gr.Accordion("Options", open=False):
82
- enable_viz = gr.Radio([("Yes", 1), ("No", 0)], value=1, label="Visualization")
83
- include_kp = gr.Radio([("Yes", 1), ("No", 0)], value=0, label="Raw Keypoints")
84
- analyze_btn = gr.Button("Analyze Movement", variant="primary")
85
-
86
- # Output column
87
- with gr.Column(scale=2, min_width=320):
88
- viz_out = gr.Video(label="Annotated Video")
89
- with gr.Accordion("Raw JSON", open=False):
90
- json_out = gr.JSON(label="Movement Analysis", elem_classes=["json-output"])
91
-
92
- # Wiring
93
- analyze_btn.click(
94
- fn=process_video_standard,
95
- inputs=[video_in, model_sel, enable_viz, include_kp],
96
- outputs=[json_out, viz_out],
97
- )
98
 
99
  # Footer
100
- gr.HTML(
101
- """
102
- <div class="author-info">
103
- Built by Csaba BolyΓ³s β€’
104
- <a href="https://github.com/bladeszasza" target="_blank">GitHub</a> β€’
105
- <a href="https://huggingface.co/BladeSzaSza" target="_blank">HF</a>
106
- </div>
107
- """
108
- )
109
  return demo
110
-
111
  if __name__ == "__main__":
112
- print("πŸš€ Starting Laban Movement Analysis...")
113
- demo = create_demo()
114
-
 
115
 
116
  ```
117
 
@@ -170,6 +274,11 @@ bool
170
 
171
 
172
 
 
 
 
 
 
173
 
174
  <tr>
175
  <td align="left"><code>label</code></td>
 
44
  """
45
 
46
  import gradio as gr
47
+ import os
48
+ from gradio_labanmovementanalysis import LabanMovementAnalysis
49
+
50
+ # Import agent API if available
51
+ # Initialize agent API if available
52
+ agent_api = None
53
+ try:
54
+ from gradio_labanmovementanalysis.agent_api import (
55
+ LabanAgentAPI,
56
+ PoseModel,
57
+ MovementDirection,
58
+ MovementIntensity
59
+ )
60
+ HAS_AGENT_API = True
61
+
62
+ try:
63
+ agent_api = LabanAgentAPI()
64
+ except Exception as e:
65
+ print(f"Warning: Agent API not available: {e}")
66
+ agent_api = None
67
+ except ImportError:
68
+ HAS_AGENT_API = False
69
+ # Initialize components
70
+ try:
71
+ analyzer = LabanMovementAnalysis(
72
+ enable_visualization=True
73
+ )
74
+ print("βœ… Core features initialized successfully")
75
+ except Exception as e:
76
+ print(f"Warning: Some features may not be available: {e}")
77
+ analyzer = LabanMovementAnalysis()
78
+
79
+
80
+ def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
81
+ """Enhanced video processing with all new features."""
82
+ if not video_input:
83
+ return {"error": "No video provided"}, None
84
+
85
+ try:
86
+ # Handle both file upload and URL input
87
+ video_path = video_input.name if hasattr(video_input, 'name') else video_input
88
+
89
+ json_result, viz_result = analyzer.process_video(
90
+ video_path,
91
+ model=model,
92
+ enable_visualization=enable_viz,
93
+ include_keypoints=include_keypoints
94
+ )
95
+ return json_result, viz_result
96
+ except Exception as e:
97
+ error_result = {"error": str(e)}
98
+ return error_result, None
99
+
100
+ def process_video_standard(video, model, enable_viz, include_keypoints):
101
+ """Standard video processing function."""
102
+ if video is None:
103
+ return None, None
104
+
105
+ try:
106
+ json_output, video_output = analyzer.process_video(
107
+ video,
108
+ model=model,
109
+ enable_visualization=enable_viz,
110
+ include_keypoints=include_keypoints
111
+ )
112
+ return json_output, video_output
113
+ except Exception as e:
114
+ return {"error": str(e)}, None
115
 
116
  # ── 4. Build UI ─────────────────────────────────────────────────
117
  def create_demo() -> gr.Blocks:
118
  with gr.Blocks(
119
+ title="Laban Movement Analysis",
120
  theme='gstaff/sketch',
121
  fill_width=True,
122
  ) as demo:
123
 
124
  # ── Hero banner ──
125
+ gr.Markdown(
126
  """
127
+ # 🎭 Laban Movement Analysis – Complete Suite
128
+
129
+ Pose estimation β€’ AI action recognition
 
 
130
  """
131
  )
132
+ with gr.Tabs():
133
+ # Tab 1: Standard Analysis
134
+ with gr.Tab("🎬 Standard Analysis"):
135
+ gr.Markdown("""
136
+ ### Classic Laban Movement Analysis
137
+ Upload a video file to analyze movement using traditional LMA metrics with pose estimation.
138
+ """)
139
+ # ── Workspace ──
140
+ with gr.Row(equal_height=True):
141
+ # Input column
142
+ with gr.Column(scale=1, min_width=260):
143
+
144
+ video_in = gr.Video(label="Upload Video", sources=["upload"], format="mp4")
145
+ # URL input option
146
+ url_input_enh = gr.Textbox(
147
+ label="Or Enter Video URL",
148
+ placeholder="YouTube URL, Vimeo URL, or direct video URL",
149
+ info="Leave file upload empty to use URL"
150
+ )
151
+ gr.Examples(
152
+ examples=[
153
+ ["examples/balette.mp4"],
154
+ ["https://www.youtube.com/shorts/RX9kH2l3L8U"],
155
+ ["https://vimeo.com/815392738"]
156
+ ],
157
+ inputs=url_input_enh,
158
+ label="Examples"
159
+ )
160
+ gr.Markdown("**Model Selection**")
161
+
162
+ model_sel = gr.Dropdown(
163
+ choices=[
164
+ # MediaPipe variants
165
+ "mediapipe-lite", "mediapipe-full", "mediapipe-heavy",
166
+ # MoveNet variants
167
+ "movenet-lightning", "movenet-thunder",
168
+ # YOLO v8 variants
169
+ "yolo-v8-n", "yolo-v8-s", "yolo-v8-m", "yolo-v8-l", "yolo-v8-x",
170
+ # YOLO v11 variants
171
+ "yolo-v11-n", "yolo-v11-s", "yolo-v11-m", "yolo-v11-l", "yolo-v11-x"
172
+ ],
173
+ value="mediapipe-full",
174
+ label="Advanced Pose Models",
175
+ info="15 model variants available"
176
+ )
177
+
178
+ gr.Markdown("**Analysis Options**")
179
+
180
+ with gr.Accordion("Options", open=False):
181
+ enable_viz = gr.Radio([("Yes", 1), ("No", 0)], value=1, label="Visualization")
182
+ include_kp = gr.Radio([("Yes", 1), ("No", 0)], value=0, label="Raw Keypoints")
183
+ analyze_btn_enh = gr.Button("πŸš€ Enhanced Analysis", variant="primary", size="lg")
184
+
185
+ # Output column
186
+ with gr.Column(scale=2, min_width=320):
187
+ viz_out = gr.Video(label="Annotated Video")
188
+ with gr.Accordion("Raw JSON", open=False):
189
+ json_out = gr.JSON(label="Movement Analysis", elem_classes=["json-output"])
190
+
191
+ # Wiring
192
+ def process_enhanced_input(file_input, url_input, model, enable_viz, include_keypoints):
193
+ """Process either file upload or URL input."""
194
+ video_source = file_input if file_input else url_input
195
+ return process_video_enhanced(video_source, model, enable_viz, include_keypoints)
196
+
197
+ analyze_btn_enh.click(
198
+ fn=process_enhanced_input,
199
+ inputs=[video_in, url_input_enh, model_sel, enable_viz, include_kp],
200
+ outputs=[json_out, viz_out],
201
+ api_name="analyze_enhanced"
202
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
 
204
  # Footer
205
+ with gr.Row():
206
+ gr.Markdown(
207
+ """
208
+ **Built by Csaba BolyΓ³s**
209
+ [GitHub](https://github.com/bladeszasza) β€’ [HF](https://huggingface.co/BladeSzaSza)
210
+ """
211
+ )
 
 
212
  return demo
213
+
214
  if __name__ == "__main__":
215
+ demo = create_demo()
216
+ demo.launch(server_name="0.0.0.0",
217
+ server_port=int(os.getenv("PORT", 7860)),
218
+ mcp_server=True)
219
 
220
  ```
221
 
 
274
 
275
 
276
 
277
+ ```python
278
+ bool
279
+ ```
280
+
281
+
282
 
283
  <tr>
284
  <td align="left"><code>label</code></td>
demo/app.py CHANGED
@@ -6,17 +6,78 @@ Author: Csaba (BladeSzaSza)
6
 
7
  import gradio as gr
8
  import os
 
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- # ── 3. Dummy backend for local dev (replace with real fn) ───────
12
- def process_video_standard(video, model, viz, kp):
13
- """Return empty JSON + passthrough video placeholder."""
14
- return {}, video
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  # ── 4. Build UI ─────────────────────────────────────────────────
17
  def create_demo() -> gr.Blocks:
18
  with gr.Blocks(
19
- title="Laban Movement Analysis – Complete Suite",
20
  theme='gstaff/sketch',
21
  fill_width=True,
22
  ) as demo:
@@ -26,36 +87,80 @@ def create_demo() -> gr.Blocks:
26
  """
27
  # 🎭 Laban Movement Analysis – Complete Suite
28
 
29
- Pose estimation β€’ AI action recognition β€’ Real-time agents
30
- **v0.01-beta β€’ 20+ pose models β€’ MCP**
31
  """
32
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
- # ── Workspace ──
35
- with gr.Row(equal_height=True):
36
- # Input column
37
- with gr.Column(scale=1, min_width=260):
38
- video_in = gr.Video(label="Upload Video", sources=["upload"], format="mp4")
39
- model_sel = gr.Dropdown(
40
- ["mediapipe", "movenet", "yolo"], value="mediapipe", label="Pose Model"
41
- )
42
- with gr.Accordion("Options", open=False):
43
- enable_viz = gr.Radio([("Yes", 1), ("No", 0)], value=1, label="Visualization")
44
- include_kp = gr.Radio([("Yes", 1), ("No", 0)], value=0, label="Raw Keypoints")
45
- analyze_btn = gr.Button("Analyze Movement", variant="primary")
46
 
47
- # Output column
48
- with gr.Column(scale=2, min_width=320):
49
- viz_out = gr.Video(label="Annotated Video")
50
- with gr.Accordion("Raw JSON", open=False):
51
- json_out = gr.JSON(label="Movement Analysis", elem_classes=["json-output"])
52
 
53
- # Wiring
54
- analyze_btn.click(
55
- fn=process_video_standard,
56
- inputs=[video_in, model_sel, enable_viz, include_kp],
57
- outputs=[json_out, viz_out],
58
- )
 
 
 
 
 
 
59
 
60
  # Footer
61
  with gr.Row():
 
6
 
7
  import gradio as gr
8
  import os
9
+ from gradio_labanmovementanalysis import LabanMovementAnalysis
10
 
11
+ # Import agent API if available
12
+ # Initialize agent API if available
13
+ agent_api = None
14
+ try:
15
+ from gradio_labanmovementanalysis.agent_api import (
16
+ LabanAgentAPI,
17
+ PoseModel,
18
+ MovementDirection,
19
+ MovementIntensity
20
+ )
21
+ HAS_AGENT_API = True
22
+
23
+ try:
24
+ agent_api = LabanAgentAPI()
25
+ except Exception as e:
26
+ print(f"Warning: Agent API not available: {e}")
27
+ agent_api = None
28
+ except ImportError:
29
+ HAS_AGENT_API = False
30
+ # Initialize components
31
+ try:
32
+ analyzer = LabanMovementAnalysis(
33
+ enable_visualization=True
34
+ )
35
+ print("βœ… Core features initialized successfully")
36
+ except Exception as e:
37
+ print(f"Warning: Some features may not be available: {e}")
38
+ analyzer = LabanMovementAnalysis()
39
 
40
+
41
+ def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
42
+ """Enhanced video processing with all new features."""
43
+ if not video_input:
44
+ return {"error": "No video provided"}, None
45
+
46
+ try:
47
+ # Handle both file upload and URL input
48
+ video_path = video_input.name if hasattr(video_input, 'name') else video_input
49
+
50
+ json_result, viz_result = analyzer.process_video(
51
+ video_path,
52
+ model=model,
53
+ enable_visualization=enable_viz,
54
+ include_keypoints=include_keypoints
55
+ )
56
+ return json_result, viz_result
57
+ except Exception as e:
58
+ error_result = {"error": str(e)}
59
+ return error_result, None
60
+
61
+ def process_video_standard(video, model, enable_viz, include_keypoints):
62
+ """Standard video processing function."""
63
+ if video is None:
64
+ return None, None
65
+
66
+ try:
67
+ json_output, video_output = analyzer.process_video(
68
+ video,
69
+ model=model,
70
+ enable_visualization=enable_viz,
71
+ include_keypoints=include_keypoints
72
+ )
73
+ return json_output, video_output
74
+ except Exception as e:
75
+ return {"error": str(e)}, None
76
 
77
  # ── 4. Build UI ─────────────────────────────────────────────────
78
  def create_demo() -> gr.Blocks:
79
  with gr.Blocks(
80
+ title="Laban Movement Analysis",
81
  theme='gstaff/sketch',
82
  fill_width=True,
83
  ) as demo:
 
87
  """
88
  # 🎭 Laban Movement Analysis – Complete Suite
89
 
90
+ Pose estimation β€’ AI action recognition
 
91
  """
92
  )
93
+ with gr.Tabs():
94
+ # Tab 1: Standard Analysis
95
+ with gr.Tab("🎬 Standard Analysis"):
96
+ gr.Markdown("""
97
+ ### Classic Laban Movement Analysis
98
+ Upload a video file to analyze movement using traditional LMA metrics with pose estimation.
99
+ """)
100
+ # ── Workspace ──
101
+ with gr.Row(equal_height=True):
102
+ # Input column
103
+ with gr.Column(scale=1, min_width=260):
104
+
105
+ video_in = gr.Video(label="Upload Video", sources=["upload"], format="mp4")
106
+ # URL input option
107
+ url_input_enh = gr.Textbox(
108
+ label="Or Enter Video URL",
109
+ placeholder="YouTube URL, Vimeo URL, or direct video URL",
110
+ info="Leave file upload empty to use URL"
111
+ )
112
+ gr.Examples(
113
+ examples=[
114
+ ["examples/balette.mp4"],
115
+ ["https://www.youtube.com/shorts/RX9kH2l3L8U"],
116
+ ["https://vimeo.com/815392738"]
117
+ ],
118
+ inputs=url_input_enh,
119
+ label="Examples"
120
+ )
121
+ gr.Markdown("**Model Selection**")
122
+
123
+ model_sel = gr.Dropdown(
124
+ choices=[
125
+ # MediaPipe variants
126
+ "mediapipe-lite", "mediapipe-full", "mediapipe-heavy",
127
+ # MoveNet variants
128
+ "movenet-lightning", "movenet-thunder",
129
+ # YOLO v8 variants
130
+ "yolo-v8-n", "yolo-v8-s", "yolo-v8-m", "yolo-v8-l", "yolo-v8-x",
131
+ # YOLO v11 variants
132
+ "yolo-v11-n", "yolo-v11-s", "yolo-v11-m", "yolo-v11-l", "yolo-v11-x"
133
+ ],
134
+ value="mediapipe-full",
135
+ label="Advanced Pose Models",
136
+ info="15 model variants available"
137
+ )
138
 
139
+ gr.Markdown("**Analysis Options**")
140
+
141
+ with gr.Accordion("Options", open=False):
142
+ enable_viz = gr.Radio([("Yes", 1), ("No", 0)], value=1, label="Visualization")
143
+ include_kp = gr.Radio([("Yes", 1), ("No", 0)], value=0, label="Raw Keypoints")
144
+ analyze_btn_enh = gr.Button("πŸš€ Enhanced Analysis", variant="primary", size="lg")
 
 
 
 
 
 
145
 
146
+ # Output column
147
+ with gr.Column(scale=2, min_width=320):
148
+ viz_out = gr.Video(label="Annotated Video")
149
+ with gr.Accordion("Raw JSON", open=False):
150
+ json_out = gr.JSON(label="Movement Analysis", elem_classes=["json-output"])
151
 
152
+ # Wiring
153
+ def process_enhanced_input(file_input, url_input, model, enable_viz, include_keypoints):
154
+ """Process either file upload or URL input."""
155
+ video_source = file_input if file_input else url_input
156
+ return process_video_enhanced(video_source, model, enable_viz, include_keypoints)
157
+
158
+ analyze_btn_enh.click(
159
+ fn=process_enhanced_input,
160
+ inputs=[video_in, url_input_enh, model_sel, enable_viz, include_kp],
161
+ outputs=[json_out, viz_out],
162
+ api_name="analyze_enhanced"
163
+ )
164
 
165
  # Footer
166
  with gr.Row():
demo/space.py CHANGED
@@ -1,4 +1,6 @@
 
1
  import gradio as gr
 
2
  import os
3
 
4
  _docs = {'LabanMovementAnalysis': {'description': 'Gradio component for video-based pose analysis with Laban Movement Analysis metrics.', 'members': {'__init__': {'default_model': {'type': 'str', 'default': '"mediapipe"', 'description': 'Default pose estimation model ("mediapipe", "movenet", "yolo")'}, 'enable_visualization': {'type': 'bool', 'default': 'True', 'description': 'Whether to generate visualization video by default'}, 'include_keypoints': {'type': 'bool', 'default': 'False', 'description': 'Whether to include raw keypoints in JSON output'}, 'enable_webrtc': {'type': 'bool', 'default': 'False', 'description': 'Whether to enable WebRTC real-time analysis'}, 'label': {'type': 'typing.Optional[str][str, None]', 'default': 'None', 'description': 'Component label'}, 'every': {'type': 'typing.Optional[float][float, None]', 'default': 'None', 'description': None}, 'show_label': {'type': 'typing.Optional[bool][bool, None]', 'default': 'None', 'description': None}, 'container': {'type': 'bool', 'default': 'True', 'description': None}, 'scale': {'type': 'typing.Optional[int][int, None]', 'default': 'None', 'description': None}, 'min_width': {'type': 'int', 'default': '160', 'description': None}, 'interactive': {'type': 'typing.Optional[bool][bool, None]', 'default': 'None', 'description': None}, 'visible': {'type': 'bool', 'default': 'True', 'description': None}, 'elem_id': {'type': 'typing.Optional[str][str, None]', 'default': 'None', 'description': None}, 'elem_classes': {'type': 'typing.Optional[typing.List[str]][\n typing.List[str][str], None\n]', 'default': 'None', 'description': None}, 'render': {'type': 'bool', 'default': 'True', 'description': None}}, 'postprocess': {'value': {'type': 'typing.Any', 'description': 'Analysis results'}}, 'preprocess': {'return': {'type': 'typing.Dict[str, typing.Any][str, typing.Any]', 'description': 'Processed data for analysis'}, 'value': None}}, 'events': {}}, '__meta__': {'additional_interfaces': {}, 'user_fn_refs': {'LabanMovementAnalysis': []}}}
@@ -18,8 +20,9 @@ with gr.Blocks(
18
 
19
  A Gradio 5 component for video movement analysis using Laban Movement Analysis (LMA) with MCP support for AI agents
20
  """, elem_classes=["md-custom"], header_links=True)
 
21
  gr.Markdown(
22
- '''
23
  ## Installation
24
 
25
  ```bash
@@ -36,74 +39,181 @@ Author: Csaba (BladeSzaSza)
36
  \"\"\"
37
 
38
  import gradio as gr
39
-
40
-
41
- # ── 3. Dummy backend for local dev (replace with real fn) ───────
42
- def process_video_standard(video, model, viz, kp):
43
- \"\"\"Return empty JSON + passthrough video placeholder.\"\"\"
44
- return {}, video
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  # ── 4. Build UI ─────────────────────────────────────────────────
47
  def create_demo() -> gr.Blocks:
48
  with gr.Blocks(
49
- title="Laban Movement Analysis – Complete Suite",
50
  theme='gstaff/sketch',
51
  fill_width=True,
52
  ) as demo:
53
 
54
  # ── Hero banner ──
55
  gr.Markdown(
56
- """
57
  # 🎭 Laban Movement Analysis – Complete Suite
58
 
59
- Pose estimation β€’ AI action recognition β€’ Real-time agents
60
- **v0.01-beta β€’ 20+ pose models β€’ MCP**
61
- """
62
  )
63
-
64
- # ── Workspace ──
65
- with gr.Row(equal_height=True):
66
- # Input column
67
- with gr.Column(scale=1, min_width=260):
68
- video_in = gr.Video(label="Upload Video", sources=["upload"], format="mp4")
69
- model_sel = gr.Dropdown(
70
- ["mediapipe", "movenet", "yolo"], value="mediapipe", label="Pose Model"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  )
72
- with gr.Accordion("Options", open=False):
73
- enable_viz = gr.Radio([("Yes", 1), ("No", 0)], value=1, label="Visualization")
74
- include_kp = gr.Radio([("Yes", 1), ("No", 0)], value=0, label="Raw Keypoints")
75
- analyze_btn = gr.Button("Analyze Movement", variant="primary")
76
-
77
- # Output column
78
- with gr.Column(scale=2, min_width=320):
79
- viz_out = gr.Video(label="Annotated Video")
80
- with gr.Accordion("Raw JSON", open=False):
81
- json_out = gr.JSON(label="Movement Analysis", elem_classes=["json-output"])
82
-
83
- # Wiring
84
- analyze_btn.click(
85
- fn=process_video_standard,
86
- inputs=[video_in, model_sel, enable_viz, include_kp],
87
- outputs=[json_out, viz_out],
88
- )
89
 
90
  # Footer
91
  with gr.Row():
92
  gr.Markdown(
93
- """
94
  **Built by Csaba BolyΓ³s**
95
  [GitHub](https://github.com/bladeszasza) β€’ [HF](https://huggingface.co/BladeSzaSza)
96
- """
97
  )
98
  return demo
99
-
100
  if __name__ == "__main__":
101
- print("πŸš€ Starting Laban Movement Analysis...")
102
- demo = create_demo()
103
-
 
104
 
105
  ```
106
- ''', elem_classes=["md-custom"], header_links=True)
107
 
108
 
109
  gr.Markdown("""
 
1
+
2
  import gradio as gr
3
+ from app import demo as app
4
  import os
5
 
6
  _docs = {'LabanMovementAnalysis': {'description': 'Gradio component for video-based pose analysis with Laban Movement Analysis metrics.', 'members': {'__init__': {'default_model': {'type': 'str', 'default': '"mediapipe"', 'description': 'Default pose estimation model ("mediapipe", "movenet", "yolo")'}, 'enable_visualization': {'type': 'bool', 'default': 'True', 'description': 'Whether to generate visualization video by default'}, 'include_keypoints': {'type': 'bool', 'default': 'False', 'description': 'Whether to include raw keypoints in JSON output'}, 'enable_webrtc': {'type': 'bool', 'default': 'False', 'description': 'Whether to enable WebRTC real-time analysis'}, 'label': {'type': 'typing.Optional[str][str, None]', 'default': 'None', 'description': 'Component label'}, 'every': {'type': 'typing.Optional[float][float, None]', 'default': 'None', 'description': None}, 'show_label': {'type': 'typing.Optional[bool][bool, None]', 'default': 'None', 'description': None}, 'container': {'type': 'bool', 'default': 'True', 'description': None}, 'scale': {'type': 'typing.Optional[int][int, None]', 'default': 'None', 'description': None}, 'min_width': {'type': 'int', 'default': '160', 'description': None}, 'interactive': {'type': 'typing.Optional[bool][bool, None]', 'default': 'None', 'description': None}, 'visible': {'type': 'bool', 'default': 'True', 'description': None}, 'elem_id': {'type': 'typing.Optional[str][str, None]', 'default': 'None', 'description': None}, 'elem_classes': {'type': 'typing.Optional[typing.List[str]][\n typing.List[str][str], None\n]', 'default': 'None', 'description': None}, 'render': {'type': 'bool', 'default': 'True', 'description': None}}, 'postprocess': {'value': {'type': 'typing.Any', 'description': 'Analysis results'}}, 'preprocess': {'return': {'type': 'typing.Dict[str, typing.Any][str, typing.Any]', 'description': 'Processed data for analysis'}, 'value': None}}, 'events': {}}, '__meta__': {'additional_interfaces': {}, 'user_fn_refs': {'LabanMovementAnalysis': []}}}
 
20
 
21
  A Gradio 5 component for video movement analysis using Laban Movement Analysis (LMA) with MCP support for AI agents
22
  """, elem_classes=["md-custom"], header_links=True)
23
+ app.render()
24
  gr.Markdown(
25
+ """
26
  ## Installation
27
 
28
  ```bash
 
39
  \"\"\"
40
 
41
  import gradio as gr
42
+ import os
43
+ from gradio_labanmovementanalysis import LabanMovementAnalysis
44
+
45
+ # Import agent API if available
46
+ # Initialize agent API if available
47
+ agent_api = None
48
+ try:
49
+ from gradio_labanmovementanalysis.agent_api import (
50
+ LabanAgentAPI,
51
+ PoseModel,
52
+ MovementDirection,
53
+ MovementIntensity
54
+ )
55
+ HAS_AGENT_API = True
56
+
57
+ try:
58
+ agent_api = LabanAgentAPI()
59
+ except Exception as e:
60
+ print(f"Warning: Agent API not available: {e}")
61
+ agent_api = None
62
+ except ImportError:
63
+ HAS_AGENT_API = False
64
+ # Initialize components
65
+ try:
66
+ analyzer = LabanMovementAnalysis(
67
+ enable_visualization=True
68
+ )
69
+ print("βœ… Core features initialized successfully")
70
+ except Exception as e:
71
+ print(f"Warning: Some features may not be available: {e}")
72
+ analyzer = LabanMovementAnalysis()
73
+
74
+
75
+ def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
76
+ \"\"\"Enhanced video processing with all new features.\"\"\"
77
+ if not video_input:
78
+ return {"error": "No video provided"}, None
79
+
80
+ try:
81
+ # Handle both file upload and URL input
82
+ video_path = video_input.name if hasattr(video_input, 'name') else video_input
83
+
84
+ json_result, viz_result = analyzer.process_video(
85
+ video_path,
86
+ model=model,
87
+ enable_visualization=enable_viz,
88
+ include_keypoints=include_keypoints
89
+ )
90
+ return json_result, viz_result
91
+ except Exception as e:
92
+ error_result = {"error": str(e)}
93
+ return error_result, None
94
+
95
+ def process_video_standard(video, model, enable_viz, include_keypoints):
96
+ \"\"\"Standard video processing function.\"\"\"
97
+ if video is None:
98
+ return None, None
99
+
100
+ try:
101
+ json_output, video_output = analyzer.process_video(
102
+ video,
103
+ model=model,
104
+ enable_visualization=enable_viz,
105
+ include_keypoints=include_keypoints
106
+ )
107
+ return json_output, video_output
108
+ except Exception as e:
109
+ return {"error": str(e)}, None
110
 
111
  # ── 4. Build UI ─────────────────────────────────────────────────
112
  def create_demo() -> gr.Blocks:
113
  with gr.Blocks(
114
+ title="Laban Movement Analysis",
115
  theme='gstaff/sketch',
116
  fill_width=True,
117
  ) as demo:
118
 
119
  # ── Hero banner ──
120
  gr.Markdown(
121
+ \"\"\"
122
  # 🎭 Laban Movement Analysis – Complete Suite
123
 
124
+ Pose estimation β€’ AI action recognition
125
+ \"\"\"
 
126
  )
127
+ with gr.Tabs():
128
+ # Tab 1: Standard Analysis
129
+ with gr.Tab("🎬 Standard Analysis"):
130
+ gr.Markdown(\"\"\"
131
+ ### Classic Laban Movement Analysis
132
+ Upload a video file to analyze movement using traditional LMA metrics with pose estimation.
133
+ \"\"\")
134
+ # ── Workspace ──
135
+ with gr.Row(equal_height=True):
136
+ # Input column
137
+ with gr.Column(scale=1, min_width=260):
138
+
139
+ video_in = gr.Video(label="Upload Video", sources=["upload"], format="mp4")
140
+ # URL input option
141
+ url_input_enh = gr.Textbox(
142
+ label="Or Enter Video URL",
143
+ placeholder="YouTube URL, Vimeo URL, or direct video URL",
144
+ info="Leave file upload empty to use URL"
145
+ )
146
+ gr.Examples(
147
+ examples=[
148
+ ["examples/balette.mp4"],
149
+ ["https://www.youtube.com/shorts/RX9kH2l3L8U"],
150
+ ["https://vimeo.com/815392738"]
151
+ ],
152
+ inputs=url_input_enh,
153
+ label="Examples"
154
+ )
155
+ gr.Markdown("**Model Selection**")
156
+
157
+ model_sel = gr.Dropdown(
158
+ choices=[
159
+ # MediaPipe variants
160
+ "mediapipe-lite", "mediapipe-full", "mediapipe-heavy",
161
+ # MoveNet variants
162
+ "movenet-lightning", "movenet-thunder",
163
+ # YOLO v8 variants
164
+ "yolo-v8-n", "yolo-v8-s", "yolo-v8-m", "yolo-v8-l", "yolo-v8-x",
165
+ # YOLO v11 variants
166
+ "yolo-v11-n", "yolo-v11-s", "yolo-v11-m", "yolo-v11-l", "yolo-v11-x"
167
+ ],
168
+ value="mediapipe-full",
169
+ label="Advanced Pose Models",
170
+ info="15 model variants available"
171
+ )
172
+
173
+ gr.Markdown("**Analysis Options**")
174
+
175
+ with gr.Accordion("Options", open=False):
176
+ enable_viz = gr.Radio([("Yes", 1), ("No", 0)], value=1, label="Visualization")
177
+ include_kp = gr.Radio([("Yes", 1), ("No", 0)], value=0, label="Raw Keypoints")
178
+ analyze_btn_enh = gr.Button("πŸš€ Enhanced Analysis", variant="primary", size="lg")
179
+
180
+ # Output column
181
+ with gr.Column(scale=2, min_width=320):
182
+ viz_out = gr.Video(label="Annotated Video")
183
+ with gr.Accordion("Raw JSON", open=False):
184
+ json_out = gr.JSON(label="Movement Analysis", elem_classes=["json-output"])
185
+
186
+ # Wiring
187
+ def process_enhanced_input(file_input, url_input, model, enable_viz, include_keypoints):
188
+ \"\"\"Process either file upload or URL input.\"\"\"
189
+ video_source = file_input if file_input else url_input
190
+ return process_video_enhanced(video_source, model, enable_viz, include_keypoints)
191
+
192
+ analyze_btn_enh.click(
193
+ fn=process_enhanced_input,
194
+ inputs=[video_in, url_input_enh, model_sel, enable_viz, include_kp],
195
+ outputs=[json_out, viz_out],
196
+ api_name="analyze_enhanced"
197
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
  # Footer
200
  with gr.Row():
201
  gr.Markdown(
202
+ \"\"\"
203
  **Built by Csaba BolyΓ³s**
204
  [GitHub](https://github.com/bladeszasza) β€’ [HF](https://huggingface.co/BladeSzaSza)
205
+ \"\"\"
206
  )
207
  return demo
208
+
209
  if __name__ == "__main__":
210
+ demo = create_demo()
211
+ demo.launch(server_name="0.0.0.0",
212
+ server_port=int(os.getenv("PORT", 7860)),
213
+ mcp_server=True)
214
 
215
  ```
216
+ """, elem_classes=["md-custom"], header_links=True)
217
 
218
 
219
  gr.Markdown("""
pyproject.toml CHANGED
@@ -5,7 +5,7 @@ description = "A Gradio 5 component for video movement analysis using Laban Move
5
  readme = "README.md"
6
  license = "apache-2.0"
7
  authors = [{ name = "Csaba BolyΓ³s", email = "bladeszasza@gmail.com" }]
8
- keywords = ["gradio-custom-component", "gradio-5", "laban-movement-analysis", "LMA", "pose-estimation", "movement-analysis", "mcp", "ai-agents", "webrtc"]
9
  # Core dependencies
10
  requires-python = ">=3.10"
11
  dependencies = [
 
5
  readme = "README.md"
6
  license = "apache-2.0"
7
  authors = [{ name = "Csaba BolyΓ³s", email = "bladeszasza@gmail.com" }]
8
+ keywords = ["gradio-custom-component", "gradio-5", "laban-movement-analysis", "LMA", "pose-estimation", "movement-analysis", "mcp", "ai-agents"]
9
  # Core dependencies
10
  requires-python = ">=3.10"
11
  dependencies = [
version.py CHANGED
@@ -24,7 +24,6 @@ RELEASE_NOTES = """
24
  🌟 Core Features:
25
  - 17+ Pose Estimation Models (MediaPipe, MoveNet, YOLO v8/v11 with x variants)
26
  - YouTube & Vimeo URL Support
27
- - Real-time WebRTC Camera Analysis
28
  - Agent API with MCP Integration
29
  - Batch Processing & Movement Filtering
30
  - Professional VIRIDIAN UI Theme
@@ -32,7 +31,6 @@ RELEASE_NOTES = """
32
  πŸš€ Technical Stack:
33
  - Gradio 5.0+ Frontend
34
  - OpenCV + MediaPipe + Ultralytics YOLO
35
- - WebRTC Streaming Technology
36
  - FastAPI Backend Integration
37
 
38
  ⚠️ Beta Status:
 
24
  🌟 Core Features:
25
  - 17+ Pose Estimation Models (MediaPipe, MoveNet, YOLO v8/v11 with x variants)
26
  - YouTube & Vimeo URL Support
 
27
  - Agent API with MCP Integration
28
  - Batch Processing & Movement Filtering
29
  - Professional VIRIDIAN UI Theme
 
31
  πŸš€ Technical Stack:
32
  - Gradio 5.0+ Frontend
33
  - OpenCV + MediaPipe + Ultralytics YOLO
 
34
  - FastAPI Backend Integration
35
 
36
  ⚠️ Beta Status: