NeuroMusicLab / app.py
sofieff's picture
layering works
c2929af
raw
history blame
53 kB
"""
EEG Motor Imagery Music Composer - Redesigned Interface
=======================================================
Brain-Computer Interface that creates music compositions based on imagined movements.
"""
import gradio as gr
import numpy as np
import matplotlib.pyplot as plt
import time
import threading
import os
from typing import Dict, Tuple, Any, List
# Import our custom modules
from sound_library import SoundManager
from data_processor import EEGDataProcessor
from classifier import MotorImageryClassifier
from config import DEMO_DATA_PATHS, CLASS_NAMES, CONFIDENCE_THRESHOLD
def validate_data_setup() -> str:
"""Validate that required data files are available."""
missing_files = []
for subject_id, path in DEMO_DATA_PATHS.items():
try:
import os
if not os.path.exists(path):
missing_files.append(f"Subject {subject_id}: {path}")
except Exception as e:
missing_files.append(f"Subject {subject_id}: Error checking {path}")
if missing_files:
return f"❌ Missing data files:\n" + "\n".join(missing_files)
return "βœ… All data files found"
# Global app state
app_state = {
'is_running': False,
'demo_data': None,
'demo_labels': None,
'classification_history': [],
'composition_active': False,
'auto_mode': False
}
# Initialize components
print("🧠 EEG Motor Imagery Music Composer")
print("=" * 50)
print("Starting Gradio application...")
try:
sound_manager = SoundManager()
data_processor = EEGDataProcessor()
classifier = MotorImageryClassifier()
# Load demo data
import os
existing_files = [f for f in DEMO_DATA_PATHS if os.path.exists(f)]
if existing_files:
app_state['demo_data'], app_state['demo_labels'] = data_processor.process_files(existing_files)
else:
app_state['demo_data'], app_state['demo_labels'] = None, None
if app_state['demo_data'] is not None:
# Initialize classifier with proper dimensions
classifier.load_model(n_chans=app_state['demo_data'].shape[1], n_times=app_state['demo_data'].shape[2])
print(f"βœ… Pre-trained model loaded successfully from {classifier.model_path}")
print(f"Pre-trained Demo: {len(app_state['demo_data'])} samples from {len(existing_files)} subjects")
else:
print("⚠️ No demo data loaded - check your .mat files")
print(f"Available sound classes: {list(sound_manager.current_sound_mapping.keys())}")
except Exception as e:
print(f"❌ Error during initialization: {e}")
raise RuntimeError(
"Cannot initialize app without real EEG data. "
"Please check your data files and paths."
)
def get_movement_sounds() -> Dict[str, str]:
"""Get the current sound files for each movement."""
sounds = {}
for movement, sound_file in sound_manager.current_sound_mapping.items():
if movement in ['left_hand', 'right_hand', 'left_leg', "right_leg", 'tongue']: # Only show main movements
if sound_file is not None: # Check if sound_file is not None
sound_path = sound_manager.sound_dir / sound_file
if sound_path.exists():
# Convert to absolute path for Gradio audio components
sounds[movement] = str(sound_path.resolve())
return sounds
def start_composition():
"""Start the composition process and perform initial classification."""
global app_state
# Only start new cycle if not already active
if not app_state['composition_active']:
app_state['composition_active'] = True
sound_manager.start_new_cycle() # Reset composition only when starting fresh
if app_state['demo_data'] is None:
return "❌ No data", "❌ No data", "❌ No data", None, None, None, None, None, None, "No EEG data available"
# Get current target
target_movement = sound_manager.get_current_target_movement()
print(f"DEBUG start_composition: current target = {target_movement}")
# Check if cycle is complete
if target_movement == "cycle_complete":
return "🎡 Cycle Complete!", "🎡 Complete", "Remap sounds to continue", None, None, None, None, None, None, "Cycle complete - remap sounds to continue"
# Perform initial EEG classification
epoch_data, true_label = data_processor.simulate_real_time_data(
app_state['demo_data'], app_state['demo_labels'], mode="class_balanced"
)
# Classify the epoch
predicted_class, confidence, probabilities = classifier.predict(epoch_data)
predicted_name = classifier.class_names[predicted_class]
# Process classification
result = sound_manager.process_classification(predicted_name, confidence, CONFIDENCE_THRESHOLD)
# Always check for DJ mode transition after classification
dj_transitioned = sound_manager.transition_to_dj_phase()
print(f"DEBUG: DJ mode transitioned: {dj_transitioned}, current_phase: {sound_manager.current_phase}")
# Create visualization
fig = create_eeg_plot(epoch_data, target_movement, predicted_name, confidence, result['sound_added'])
# Initialize all audio components to None (no sound by default)
left_hand_audio = None
right_hand_audio = None
left_leg_audio = None
right_leg_audio = None
tongue_audio = None
# Debug: Print classification result
print(f"DEBUG start_composition: predicted={predicted_name}, confidence={confidence:.3f}, sound_added={result['sound_added']}")
# Only play the sound if it was just added and matches the prediction
if result['sound_added']: # might add confidence threshold for sound output here
sounds = get_movement_sounds()
print(f"DEBUG: Available sounds: {list(sounds.keys())}")
if predicted_name == 'left_hand' and 'left_hand' in sounds:
left_hand_audio = sounds['left_hand']
print(f"DEBUG: Setting left_hand_audio to {sounds['left_hand']}")
elif predicted_name == 'right_hand' and 'right_hand' in sounds:
right_hand_audio = sounds['right_hand']
print(f"DEBUG: Setting right_hand_audio to {sounds['right_hand']}")
elif predicted_name == 'left_leg' and 'left_leg' in sounds:
left_leg_audio = sounds['left_leg']
print(f"DEBUG: Setting left_leg_audio to {sounds['left_leg']}")
elif predicted_name == 'right_leg' and 'right_leg' in sounds:
right_leg_audio = sounds['right_leg']
print(f"DEBUG: Setting right_leg_audio to {sounds['right_leg']}")
elif predicted_name == 'tongue' and 'tongue' in sounds:
tongue_audio = sounds['tongue']
print(f"DEBUG: Setting tongue_audio to {sounds['tongue']}")
else:
print("DEBUG: No sound added - confidence too low or other issue")
# Format next target with progress information
next_target = sound_manager.get_current_target_movement()
completed_count = len(sound_manager.movements_completed)
total_count = len(sound_manager.current_movement_sequence)
if next_target == "cycle_complete":
target_text = "🎡 Cycle Complete!"
else:
target_text = f"🎯 Any Movement ({completed_count}/{total_count} complete) - Use 'Classify Epoch' button to continue"
predicted_text = f"🧠 Predicted: {predicted_name.replace('_', ' ').title()} ({confidence:.2f})"
# Get composition info
composition_info = sound_manager.get_composition_info()
status_text = format_composition_summary(composition_info)
return (
target_text,
predicted_text,
"2-3 seconds",
fig,
left_hand_audio,
right_hand_audio,
left_leg_audio,
right_leg_audio,
tongue_audio,
status_text
)
def stop_composition():
"""Stop the composition process."""
global app_state
app_state['composition_active'] = False
app_state['auto_mode'] = False
return (
"Composition stopped. Click 'Start Composing' to begin again",
"--",
"--",
"Stopped - click Start to resume"
)
def start_automatic_composition():
"""Start automatic composition with continuous classification."""
global app_state
# Only start new cycle if not already active
if not app_state['composition_active']:
app_state['composition_active'] = True
app_state['auto_mode'] = True
sound_manager.start_new_cycle() # Reset composition only when starting fresh
if app_state['demo_data'] is None:
return "❌ No data", "❌ No data", "❌ No data", "❌ No data", None, None, None, None, None, None, "No EEG data available", gr.update(visible=True), gr.update(visible=False)
# Get current target
target_movement = sound_manager.get_current_target_movement()
print(f"DEBUG start_automatic_composition: current target = {target_movement}")
# Check if cycle is complete
if target_movement == "cycle_complete":
# Mark current cycle as complete
sound_manager.complete_current_cycle()
# Check if rehabilitation session should end
if sound_manager.should_end_session():
app_state['auto_mode'] = False # Stop automatic mode
return (
"πŸŽ‰ Session Complete!",
"πŸ† Amazing Progress!",
"Rehabilitation session finished!",
"🌟 Congratulations! You've created 2 unique brain-music compositions!\n\n" +
"πŸ’ͺ Your motor imagery skills are improving!\n\n" +
"🎡 You can review your compositions above, or start a new session anytime.\n\n" +
"Would you like to continue with more cycles, or take a well-deserved break?",
None, None, None, None, None, None,
f"βœ… Session Complete: {sound_manager.completed_cycles}/{sound_manager.max_cycles} compositions finished!"
)
else:
# Start next cycle automatically
sound_manager.start_new_cycle()
print("πŸ”„ Cycle completed! Starting new cycle automatically...")
target_movement = sound_manager.get_current_target_movement() # Get new target
# Show user prompt - encouraging start message
cycle_num = sound_manager.current_cycle
if cycle_num == 1:
prompt_text = "🌟 Welcome to your rehabilitation session! Let's start with any movement you can imagine..."
elif cycle_num == 2:
prompt_text = "πŸ’ͺ Excellent work on your first composition! Ready for composition #2?"
else:
prompt_text = "🧠 Let's continue - imagine any movement now..."
# Perform initial EEG classification
epoch_data, true_label = data_processor.simulate_real_time_data(
app_state['demo_data'], app_state['demo_labels'], mode="class_balanced"
)
# Classify the epoch
predicted_class, confidence, probabilities = classifier.predict(epoch_data)
predicted_name = classifier.class_names[predicted_class]
# Handle DJ effects or building phase
if sound_manager.current_phase == "dj_effects" and confidence > CONFIDENCE_THRESHOLD:
# DJ Effects Mode - toggle effects instead of adding sounds
dj_result = sound_manager.toggle_dj_effect(predicted_name)
result = {
'sound_added': dj_result['effect_applied'],
'mixed_composition': dj_result.get('mixed_composition'),
'effect_name': dj_result.get('effect_name', ''),
'effect_status': dj_result.get('effect_status', '')
}
else:
# Building Mode - process classification normally
result = sound_manager.process_classification(predicted_name, confidence, CONFIDENCE_THRESHOLD)
# Create visualization
fig = create_eeg_plot(epoch_data, target_movement, predicted_name, confidence, result['sound_added'])
# Initialize all audio components to None (no sound by default)
left_hand_audio = None
right_hand_audio = None
left_leg_audio = None
right_leg_audio = None
tongue_audio = None
# Debug: Print classification result
print(f"DEBUG start_automatic_composition: predicted={predicted_name}, confidence={confidence:.3f}, sound_added={result['sound_added']}")
# Handle audio display based on current phase
if sound_manager.current_phase == "dj_effects":
# DJ Effects Phase - continue showing individual sounds in their players
sounds = get_movement_sounds()
# Always check for phase transition to DJ effects after each classification
sound_manager.transition_to_dj_phase()
completed_movements = sound_manager.movements_completed
# Display each completed movement sound in its respective player (same as building mode)
if 'left_hand' in completed_movements and 'left_hand' in sounds:
left_hand_audio = sounds['left_hand']
print(f"DEBUG DJ: Left hand playing: {sounds['left_hand']}")
if 'right_hand' in completed_movements and 'right_hand' in sounds:
right_hand_audio = sounds['right_hand']
print(f"DEBUG DJ: Right hand playing: {sounds['right_hand']}")
if 'left_leg' in completed_movements and 'left_leg' in sounds:
left_leg_audio = sounds['left_leg']
print(f"DEBUG DJ: Left leg playing: {sounds['left_leg']}")
if 'right_leg' in completed_movements and 'right_leg' in sounds:
right_leg_audio = sounds['right_leg']
print(f"DEBUG DJ: Right leg playing: {sounds['right_leg']}")
if 'tongue' in completed_movements and 'tongue' in sounds:
tongue_audio = sounds['tongue']
print(f"DEBUG DJ: Tongue playing: {sounds['tongue']}")
print(f"DEBUG DJ: {len(completed_movements)} individual sounds playing with effects applied")
else:
# Building Phase - create and show layered composition
sounds = get_movement_sounds()
completed_movements = sound_manager.movements_completed
print(f"DEBUG: Available sounds: {list(sounds.keys())}")
print(f"DEBUG: Completed movements: {completed_movements}")
# Display individual sounds in their respective players for layered composition
# All completed movement sounds will play simultaneously, creating natural layering
if len(completed_movements) > 0:
print(f"DEBUG: Showing individual sounds that will layer together: {list(completed_movements)}")
# Display each completed movement sound in its respective player
if 'left_hand' in completed_movements and 'left_hand' in sounds:
left_hand_audio = sounds['left_hand']
print(f"DEBUG: Left hand playing: {sounds['left_hand']}")
if 'right_hand' in completed_movements and 'right_hand' in sounds:
right_hand_audio = sounds['right_hand']
print(f"DEBUG: Right hand playing: {sounds['right_hand']}")
if 'left_leg' in completed_movements and 'left_leg' in sounds:
left_leg_audio = sounds['left_leg']
print(f"DEBUG: Left leg playing: {sounds['left_leg']}")
if 'right_leg' in completed_movements and 'right_leg' in sounds:
right_leg_audio = sounds['right_leg']
print(f"DEBUG: Right leg playing: {sounds['right_leg']}")
if 'tongue' in completed_movements and 'tongue' in sounds:
tongue_audio = sounds['tongue']
print(f"DEBUG: Tongue playing: {sounds['tongue']}")
print(f"DEBUG: {len(completed_movements)} individual sounds will play together creating layered composition")
# Check for phase transition to DJ effects
completed_count = len(sound_manager.movements_completed)
total_count = len(sound_manager.current_movement_sequence)
# Transition to DJ effects if all movements completed but still in building phase
if completed_count >= total_count and sound_manager.current_phase == "building":
sound_manager.transition_to_dj_phase()
# Format display based on current phase
if sound_manager.current_phase == "dj_effects":
target_text = "🎧 DJ Mode Active - Use movements to control effects!"
else:
next_target = sound_manager.get_current_target_movement()
if next_target == "cycle_complete":
target_text = "🎡 Composition Complete!"
else:
target_text = f"🎯 Building Composition ({completed_count}/{total_count} layers)"
# Update display text based on phase
if sound_manager.current_phase == "dj_effects":
if result.get('effect_name') and result.get('effect_status'):
predicted_text = f"πŸŽ›οΈ {result['effect_name']}: {result['effect_status']}"
else:
predicted_text = f"🧠 Detected: {predicted_name.replace('_', ' ').title()} ({confidence:.2f})"
timer_text = "🎧 DJ Mode - Effects updating every 3 seconds..."
else:
predicted_text = f"🧠 Predicted: {predicted_name.replace('_', ' ').title()} ({confidence:.2f})"
timer_text = "⏱️ Next trial in 2-3 seconds..."
# Get composition info
composition_info = sound_manager.get_composition_info()
status_text = format_composition_summary(composition_info)
# Phase-based instruction visibility
building_visible = sound_manager.current_phase == "building"
dj_visible = sound_manager.current_phase == "dj_effects"
return (
target_text,
predicted_text,
timer_text,
prompt_text,
fig,
left_hand_audio,
right_hand_audio,
left_leg_audio,
right_leg_audio,
tongue_audio,
status_text,
gr.update(visible=building_visible), # building_instructions
gr.update(visible=dj_visible) # dj_instructions
)
def manual_classify():
"""Manual classification for testing purposes."""
global app_state
if app_state['demo_data'] is None:
return "❌ No data", "❌ No data", "Manual mode", None, "No EEG data available", None, None, None, None, None
# Get EEG data sample
epoch_data, true_label = data_processor.simulate_real_time_data(
app_state['demo_data'], app_state['demo_labels'], mode="class_balanced"
)
# Classify the epoch
predicted_class, confidence, probabilities = classifier.predict(epoch_data)
predicted_name = classifier.class_names[predicted_class]
# Create visualization (without composition context)
fig = create_eeg_plot(epoch_data, "manual_test", predicted_name, confidence, False)
# Format results
target_text = "🎯 Manual Test Mode"
predicted_text = f"🧠 {predicted_name.replace('_', ' ').title()} ({confidence:.2f})"
# Update results log
import time
timestamp = time.strftime("%H:%M:%S")
result_entry = f"[{timestamp}] Predicted: {predicted_name.replace('_', ' ').title()} (confidence: {confidence:.3f})"
# Get sound files for preview (no autoplay)
sounds = get_movement_sounds()
left_hand_audio = sounds.get('left_hand', None)
right_hand_audio = sounds.get('right_hand', None)
left_leg_audio = sounds.get('left_leg', None)
right_leg_audio = sounds.get('right_leg', None)
tongue_audio = sounds.get('tongue', None)
return (
target_text,
predicted_text,
"Manual mode - click button to classify",
fig,
result_entry,
left_hand_audio,
right_hand_audio,
left_leg_audio,
right_leg_audio,
tongue_audio
)
def clear_manual():
"""Clear manual testing results."""
return (
"🎯 Manual Test Mode",
"--",
"Manual mode",
None,
"Manual classification results cleared...",
None, None, None, None, None
)
def continue_automatic_composition():
"""Continue automatic composition - called for subsequent trials."""
global app_state
if not app_state['composition_active'] or not app_state['auto_mode']:
return "πŸ›‘ Stopped", "--", "--", "Automatic composition stopped", None, None, None, None, None, None, "Stopped", gr.update(visible=True), gr.update(visible=False)
if app_state['demo_data'] is None:
return "❌ No data", "❌ No data", "❌ No data", "❌ No data", None, None, None, None, None, None, "No EEG data available", gr.update(visible=True), gr.update(visible=False)
# Get current target
target_movement = sound_manager.get_current_target_movement()
print(f"DEBUG continue_automatic_composition: current target = {target_movement}")
# Check if cycle is complete
if target_movement == "cycle_complete":
# Mark current cycle as complete
sound_manager.complete_current_cycle()
# Check if rehabilitation session should end
if sound_manager.should_end_session():
app_state['auto_mode'] = False # Stop automatic mode
return (
"πŸŽ‰ Session Complete!",
"πŸ† Amazing Progress!",
"Rehabilitation session finished!",
"🌟 Congratulations! You've created 2 unique brain-music compositions!\n\n" +
"πŸ’ͺ Your motor imagery skills are improving!\n\n" +
"🎡 You can review your compositions above, or start a new session anytime.\n\n" +
"Would you like to continue with more cycles, or take a well-deserved break?",
None, None, None, None, None, None,
f"βœ… Session Complete: {sound_manager.completed_cycles}/{sound_manager.max_cycles} compositions finished!",
gr.update(visible=True), gr.update(visible=False)
)
else:
# Start next cycle automatically
sound_manager.start_new_cycle()
print("πŸ”„ Cycle completed! Starting new cycle automatically...")
target_movement = sound_manager.get_current_target_movement() # Get new target
# Show next user prompt - rehabilitation-focused messaging
prompts = [
"πŸ’ͺ Great work! Imagine your next movement...",
"🎯 You're doing amazing! Focus and imagine any movement...",
"✨ Excellent progress! Ready for the next movement?",
"🌟 Keep it up! Concentrate and imagine now...",
"πŸ† Fantastic! Next trial - imagine any movement..."
]
import random
prompt_text = random.choice(prompts)
# Add progress encouragement
completed_count = len(sound_manager.movements_completed)
total_count = len(sound_manager.current_movement_sequence)
if completed_count > 0:
prompt_text += f" ({completed_count}/{total_count} movements completed this cycle)"
# Perform EEG classification
epoch_data, true_label = data_processor.simulate_real_time_data(
app_state['demo_data'], app_state['demo_labels'], mode="class_balanced"
)
# Classify the epoch
predicted_class, confidence, probabilities = classifier.predict(epoch_data)
predicted_name = classifier.class_names[predicted_class]
# Handle DJ effects or building phase
if sound_manager.current_phase == "dj_effects" and confidence > CONFIDENCE_THRESHOLD:
# DJ Effects Mode - toggle effects instead of adding sounds
dj_result = sound_manager.toggle_dj_effect(predicted_name)
result = {
'sound_added': dj_result['effect_applied'],
'mixed_composition': dj_result.get('mixed_composition'),
'effect_name': dj_result.get('effect_name', ''),
'effect_status': dj_result.get('effect_status', '')
}
else:
# Building Mode - process classification normally
result = sound_manager.process_classification(predicted_name, confidence, CONFIDENCE_THRESHOLD)
# Check if we should transition to DJ phase
completed_count = len(sound_manager.movements_completed)
if completed_count >= 5 and sound_manager.current_phase == "building":
if sound_manager.transition_to_dj_phase():
print(f"DEBUG: Successfully transitioned to DJ phase with {completed_count} completed movements")
# Create visualization
fig = create_eeg_plot(epoch_data, target_movement, predicted_name, confidence, result['sound_added'])
# Initialize all audio components to None (no sound by default)
left_hand_audio = None
right_hand_audio = None
left_leg_audio = None
right_leg_audio = None
tongue_audio = None
# Handle audio differently based on phase
if sound_manager.current_phase == "dj_effects":
# DJ Mode: Show only the full mixed track and route effects to it
mixed_track = sound_manager.get_current_mixed_composition()
print(f"DEBUG continue: DJ Mode - Showing full mixed track: {mixed_track}")
# Hide individual movement sounds by setting them to None
left_hand_audio = None
right_hand_audio = None
left_leg_audio = None
right_leg_audio = None
tongue_audio = None
# The mixed_track will be shown in a dedicated gr.Audio component in the UI (update UI accordingly)
else:
# Building Mode: Display individual sounds in their respective players for layered composition
# All completed movement sounds will play simultaneously, creating natural layering
sounds = get_movement_sounds()
completed_movements = sound_manager.movements_completed
print(f"DEBUG continue: Available sounds: {list(sounds.keys())}")
print(f"DEBUG continue: Completed movements: {completed_movements}")
if len(completed_movements) > 0:
print(f"DEBUG continue: Showing individual sounds that will layer together: {list(completed_movements)}")
# Display each completed movement sound in its respective player
if 'left_hand' in completed_movements and 'left_hand' in sounds:
left_hand_audio = sounds['left_hand']
print(f"DEBUG continue: Left hand playing: {sounds['left_hand']}")
if 'right_hand' in completed_movements and 'right_hand' in sounds:
right_hand_audio = sounds['right_hand']
print(f"DEBUG continue: Right hand playing: {sounds['right_hand']}")
if 'left_leg' in completed_movements and 'left_leg' in sounds:
left_leg_audio = sounds['left_leg']
print(f"DEBUG continue: Left leg playing: {sounds['left_leg']}")
if 'right_leg' in completed_movements and 'right_leg' in sounds:
right_leg_audio = sounds['right_leg']
print(f"DEBUG continue: Right leg playing: {sounds['right_leg']}")
if 'tongue' in completed_movements and 'tongue' in sounds:
tongue_audio = sounds['tongue']
print(f"DEBUG continue: Tongue playing: {sounds['tongue']}")
print(f"DEBUG continue: {len(completed_movements)} individual sounds will play together creating layered composition")
# Format display with progress information
completed_count = len(sound_manager.movements_completed)
total_count = len(sound_manager.current_movement_sequence)
if sound_manager.current_phase == "dj_effects":
target_text = f"🎧 DJ Mode - Control Effects with Movements"
predicted_text = f"🧠 Predicted: {predicted_name.replace('_', ' ').title()} ({confidence:.2f})"
if result.get('effect_applied'):
effect_name = result.get('effect_name', '')
effect_status = result.get('effect_status', '')
timer_text = f"�️ {effect_name}: {effect_status}"
else:
timer_text = "🎡 Move to control effects..."
else:
target_text = f"�🎯 Any Movement ({completed_count}/{total_count} complete)"
predicted_text = f"🧠 Predicted: {predicted_name.replace('_', ' ').title()} ({confidence:.2f})"
timer_text = "⏱️ Next trial in 2-3 seconds..." if app_state['auto_mode'] else "Stopped"
# Get composition info
composition_info = sound_manager.get_composition_info()
status_text = format_composition_summary(composition_info)
# Phase-based instruction visibility
building_visible = sound_manager.current_phase == "building"
dj_visible = sound_manager.current_phase == "dj_effects"
return (
target_text,
predicted_text,
timer_text,
prompt_text,
fig,
left_hand_audio,
right_hand_audio,
left_leg_audio,
right_leg_audio,
tongue_audio,
status_text,
gr.update(visible=building_visible), # building_instructions
gr.update(visible=dj_visible) # dj_instructions
)
def classify_epoch():
"""Classify a single EEG epoch and update composition."""
global app_state
if not app_state['composition_active']:
return "❌ Not active", "❌ Not active", "❌ Not active", None, None, None, None, None, None, "Click 'Start Composing' first"
if app_state['demo_data'] is None:
return "❌ No data", "❌ No data", "❌ No data", None, None, None, None, None, None, "No EEG data available"
# Get current target
target_movement = sound_manager.get_current_target_movement()
print(f"DEBUG classify_epoch: current target = {target_movement}")
if target_movement == "cycle_complete":
return "🎡 Cycle Complete!", "🎡 Complete", "Remap sounds to continue", None, None, None, None, None, None, "Cycle complete - remap sounds to continue"
# Get EEG data sample
epoch_data, true_label = data_processor.simulate_real_time_data(
app_state['demo_data'], app_state['demo_labels'], mode="class_balanced"
)
# Classify the epoch
predicted_class, confidence, probabilities = classifier.predict(epoch_data)
predicted_name = classifier.class_names[predicted_class]
# Process classification
result = sound_manager.process_classification(predicted_name, confidence, CONFIDENCE_THRESHOLD)
# Check if we should transition to DJ phase
completed_count = len(sound_manager.movements_completed)
if completed_count >= 5 and sound_manager.current_phase == "building":
if sound_manager.transition_to_dj_phase():
print(f"DEBUG: Successfully transitioned to DJ phase with {completed_count} completed movements")
# Create visualization
fig = create_eeg_plot(epoch_data, target_movement, predicted_name, confidence, result['sound_added'])
# Initialize all audio components to None (no sound by default)
left_hand_audio = None
right_hand_audio = None
left_leg_audio = None
right_leg_audio = None
tongue_audio = None
# Always assign all completed movement sounds to their respective audio slots
sounds = get_movement_sounds()
completed_movements = sound_manager.movements_completed
if 'left_hand' in completed_movements and 'left_hand' in sounds:
left_hand_audio = sounds['left_hand']
if 'right_hand' in completed_movements and 'right_hand' in sounds:
right_hand_audio = sounds['right_hand']
if 'left_leg' in completed_movements and 'left_leg' in sounds:
left_leg_audio = sounds['left_leg']
if 'right_leg' in completed_movements and 'right_leg' in sounds:
right_leg_audio = sounds['right_leg']
if 'tongue' in completed_movements and 'tongue' in sounds:
tongue_audio = sounds['tongue']
# Format next target
next_target = sound_manager.get_current_target_movement()
target_text = f"🎯 Target: {next_target.replace('_', ' ').title()}" if next_target != "cycle_complete" else "🎡 Cycle Complete!"
predicted_text = f"🧠 Predicted: {predicted_name.replace('_', ' ').title()} ({confidence:.2f})"
# Get composition info
composition_info = sound_manager.get_composition_info()
status_text = format_composition_summary(composition_info)
return (
target_text,
predicted_text,
"2-3 seconds",
fig,
left_hand_audio,
right_hand_audio,
left_leg_audio,
right_leg_audio,
tongue_audio,
status_text
)
def create_eeg_plot(eeg_data: np.ndarray, target_movement: str, predicted_name: str, confidence: float, sound_added: bool) -> plt.Figure:
"""Create EEG plot with target movement and classification result."""
fig, axes = plt.subplots(2, 2, figsize=(12, 8))
axes = axes.flatten()
# Plot 4 channels
time_points = np.arange(eeg_data.shape[1]) / 200 # 200 Hz sampling rate
channel_names = ['C3', 'C4', 'T3', 'T4'] # Motor cortex channels
for i in range(min(4, eeg_data.shape[0])):
color = 'green' if sound_added else 'blue'
axes[i].plot(time_points, eeg_data[i], color=color, linewidth=1)
if i < len(channel_names):
axes[i].set_title(f'{channel_names[i]} (Ch {i+1})')
else:
axes[i].set_title(f'Channel {i+1}')
axes[i].set_xlabel('Time (s)')
axes[i].set_ylabel('Amplitude (Β΅V)')
axes[i].grid(True, alpha=0.3)
# Add overall title with status
status = "βœ“ SOUND ADDED" if sound_added else "β—‹ No sound"
title = f"Target: {target_movement.replace('_', ' ').title()} | Predicted: {predicted_name.replace('_', ' ').title()} ({confidence:.2f}) | {status}"
fig.suptitle(title, fontsize=12, fontweight='bold')
fig.tight_layout()
return fig
def format_composition_summary(composition_info: Dict) -> str:
"""Format composition information for display."""
if not composition_info.get('layers_by_cycle'):
return "No composition layers yet"
summary = []
for cycle, layers in composition_info['layers_by_cycle'].items():
summary.append(f"Cycle {cycle + 1}: {len(layers)} layers")
for layer in layers:
movement = layer.get('movement', 'unknown')
confidence = layer.get('confidence', 0)
summary.append(f" β€’ {movement.replace('_', ' ').title()} ({confidence:.2f})")
return "\n".join(summary) if summary else "No composition layers"
# Create Gradio interface
def create_interface():
with gr.Blocks(title="EEG Motor Imagery Music Composer", theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🧠🎡 EEG Motor Imagery Rehabilitation Composer")
gr.Markdown("**Therapeutic Brain-Computer Interface for Motor Recovery**\n\nCreate beautiful music compositions using your brain signals! This rehabilitation tool helps strengthen motor imagery skills while creating personalized musical pieces.")
with gr.Tabs() as tabs:
# Main Composition Tab
with gr.TabItem("🎡 Automatic Composition"):
with gr.Row():
# Left side - Task and EEG information
with gr.Column(scale=2):
# Task instructions - Building Phase
with gr.Group() as building_instructions:
gr.Markdown("### 🎯 Rehabilitation Session Instructions")
gr.Markdown("""
**Motor Imagery Training:**
- **Imagine** opening or closing your **right or left hand**
- **Visualize** briefly moving your **right or left leg or foot**
- **Think about** pronouncing **"L"** with your tongue
- **Rest state** (no movement imagination)
*🌟 Each successful imagination creates a musical layer!*
**Session Structure:** Build composition, then control DJ effects
*Press Start to begin your personalized rehabilitation session*
""")
# DJ Instructions - Effects Phase (initially hidden)
with gr.Group(visible=False) as dj_instructions:
gr.Markdown("### 🎧 DJ Controller Mode")
gr.Markdown("""
**πŸŽ‰ Composition Complete! You are now the DJ!**
**Use the same movements to control audio effects:**
- πŸ‘ˆ **Left Hand**: Volume Fade On/Off
- πŸ‘‰ **Right Hand**: High Pass Filter On/Off
- 🦡 **Left Leg**: Reverb Effect On/Off
- 🦡 **Right Leg**: Low Pass Filter On/Off
- πŸ‘… **Tongue**: Bass Boost On/Off
*πŸŽ›οΈ Each movement toggles an effect - Mix your creation!*
""")
# Start button
with gr.Row():
start_btn = gr.Button("🎡 Start Composing", variant="primary", size="lg")
continue_btn = gr.Button("⏭️ Continue", variant="primary", size="lg", visible=False)
stop_btn = gr.Button("πŸ›‘ Stop", variant="secondary", size="lg")
# Session completion options (shown after 2 cycles)
with gr.Row(visible=False) as session_complete_row:
new_session_btn = gr.Button("πŸ”„ Start New Session", variant="primary", size="lg")
extend_session_btn = gr.Button("βž• Continue Session", variant="secondary", size="lg")
# Timer for automatic progression (hidden from user)
timer = gr.Timer(value=3.0, active=False) # 3 second intervals
# User prompt display
user_prompt = gr.Textbox(label="πŸ’­ User Prompt", interactive=False, value="Click 'Start Composing' to begin",
elem_classes=["prompt-display"])
# Current status
with gr.Row():
target_display = gr.Textbox(label="🎯 Current Target", interactive=False, value="Ready to start")
predicted_display = gr.Textbox(label="🧠 Predicted", interactive=False, value="--")
timer_display = gr.Textbox(label="⏱️ Next Trial In", interactive=False, value="--")
eeg_plot = gr.Plot(label="EEG Data Visualization")
# Right side - Compositional layers
with gr.Column(scale=1):
gr.Markdown("### 🎡 Compositional Layers")
# Show 5 movement sounds
left_hand_sound = gr.Audio(label="πŸ‘ˆ Left Hand", interactive=False, autoplay=True, visible=True)
right_hand_sound = gr.Audio(label="πŸ‘‰ Right Hand", interactive=False, autoplay=True, visible=True)
left_leg_sound = gr.Audio(label="🦡 Left Leg", interactive=False, autoplay=True, visible=True)
right_leg_sound = gr.Audio(label="🦡 Right Leg", interactive=False, autoplay=True, visible=True)
tongue_sound = gr.Audio(label="πŸ‘… Tongue", interactive=False, autoplay=True, visible=True)
# Composition status
composition_status = gr.Textbox(label="Composition Status", interactive=False, lines=5)
# Manual Testing Tab
with gr.TabItem("🧠 Manual Testing"):
with gr.Row():
with gr.Column(scale=2):
gr.Markdown("### πŸ”¬ Manual EEG Classification Testing")
gr.Markdown("Use this tab to manually test the EEG classifier without the composition system.")
with gr.Row():
classify_btn = gr.Button("🧠 Classify Single Epoch", variant="primary")
clear_btn = gr.Button("�️ Clear", variant="secondary")
# Manual status displays
manual_target_display = gr.Textbox(label="🎯 Current Target", interactive=False, value="Ready")
manual_predicted_display = gr.Textbox(label="🧠 Predicted", interactive=False, value="--")
manual_timer_display = gr.Textbox(label="⏱️ Status", interactive=False, value="Manual mode")
manual_eeg_plot = gr.Plot(label="EEG Data Visualization")
with gr.Column(scale=1):
gr.Markdown("### πŸ“Š Classification Results")
manual_results = gr.Textbox(label="Results Log", interactive=False, lines=10, value="Manual classification results will appear here...")
# Individual sound previews (no autoplay in manual mode)
gr.Markdown("### πŸ”Š Sound Preview")
manual_left_hand_sound = gr.Audio(label="πŸ‘ˆ Left Hand", interactive=False, autoplay=False, visible=True)
manual_right_hand_sound = gr.Audio(label="πŸ‘‰ Right Hand", interactive=False, autoplay=False, visible=True)
manual_left_leg_sound = gr.Audio(label="🦡 Left Leg", interactive=False, autoplay=False, visible=True)
manual_right_leg_sound = gr.Audio(label="🦡 Right Leg", interactive=False, autoplay=False, visible=True)
manual_tongue_sound = gr.Audio(label="πŸ‘… Tongue", interactive=False, autoplay=False, visible=True)
# Session management functions
def start_new_session():
"""Reset everything and start a completely new rehabilitation session"""
global sound_manager
sound_manager.completed_cycles = 0
sound_manager.current_cycle = 0
sound_manager.movements_completed = set()
sound_manager.composition_layers = []
# Start fresh session
result = start_automatic_composition()
return (
result[0], # target_display
result[1], # predicted_display
result[2], # timer_display
result[3], # user_prompt
result[4], # eeg_plot
result[5], # left_hand_sound
result[6], # right_hand_sound
result[7], # left_leg_sound
result[8], # right_leg_sound
result[9], # tongue_sound
result[10], # composition_status
result[11], # building_instructions
result[12], # dj_instructions
gr.update(visible=True), # continue_btn - show it
gr.update(active=True), # timer - activate it
gr.update(visible=False) # session_complete_row - hide it
)
def extend_current_session():
"""Continue current session beyond the 2-cycle limit"""
sound_manager.max_cycles += 2 # Add 2 more cycles
# Continue with current session
result = continue_automatic_composition()
return (
result[0], # target_display
result[1], # predicted_display
result[2], # timer_display
result[3], # user_prompt
result[4], # eeg_plot
result[5], # left_hand_sound
result[6], # right_hand_sound
result[7], # left_leg_sound
result[8], # right_leg_sound
result[9], # tongue_sound
result[10], # composition_status
result[11], # building_instructions
result[12], # dj_instructions
gr.update(visible=True), # continue_btn - show it
gr.update(active=True), # timer - activate it
gr.update(visible=False) # session_complete_row - hide it
)
# Wrapper functions for timer control
def start_with_timer():
"""Start composition and activate automatic timer"""
result = start_automatic_composition()
# Show continue button and activate timer
return (
result[0], # target_display
result[1], # predicted_display
result[2], # timer_display
result[3], # user_prompt
result[4], # eeg_plot
result[5], # left_hand_sound
result[6], # right_hand_sound
result[7], # left_leg_sound
result[8], # right_leg_sound
result[9], # tongue_sound
result[10], # composition_status
result[11], # building_instructions
result[12], # dj_instructions
gr.update(visible=True), # continue_btn - show it
gr.update(active=True) # timer - activate it
)
def continue_with_timer():
"""Continue composition and manage timer state"""
result = continue_automatic_composition()
# Check if session is complete (rehabilitation session finished)
if "πŸŽ‰ Session Complete!" in result[0]:
# Show session completion options
return (
result[0], # target_display
result[1], # predicted_display
result[2], # timer_display
result[3], # user_prompt
result[4], # eeg_plot
result[5], # left_hand_sound
result[6], # right_hand_sound
result[7], # left_leg_sound
result[8], # right_leg_sound
result[9], # tongue_sound
result[10], # composition_status
result[11], # building_instructions
result[12], # dj_instructions
gr.update(active=False), # timer - deactivate it
gr.update(visible=True) # session_complete_row - show options
)
# Check if composition is complete (old logic for other cases)
elif "🎡 Cycle Complete!" in result[0]:
# Stop the timer when composition is complete
return (
result[0], # target_display
result[1], # predicted_display
result[2], # timer_display
result[3], # user_prompt
result[4], # eeg_plot
result[5], # left_hand_sound
result[6], # right_hand_sound
result[7], # left_leg_sound
result[8], # right_leg_sound
result[9], # tongue_sound
result[10], # composition_status
result[11], # building_instructions
result[12], # dj_instructions
gr.update(active=False), # timer - deactivate it
gr.update(visible=False) # session_complete_row - keep hidden
)
else:
# Keep timer active for next iteration
return (
result[0], # target_display
result[1], # predicted_display
result[2], # timer_display
result[3], # user_prompt
result[4], # eeg_plot
result[5], # left_hand_sound
result[6], # right_hand_sound
result[7], # left_leg_sound
result[8], # right_leg_sound
result[9], # tongue_sound
result[10], # composition_status
result[11], # building_instructions
result[12], # dj_instructions
gr.update(active=True), # timer - keep active
gr.update(visible=False) # session_complete_row - keep hidden
)
# Event handlers for automatic composition tab
start_btn.click(
fn=start_with_timer,
outputs=[target_display, predicted_display, timer_display, user_prompt, eeg_plot,
left_hand_sound, right_hand_sound, left_leg_sound, right_leg_sound, tongue_sound, composition_status,
building_instructions, dj_instructions, continue_btn, timer]
)
continue_btn.click(
fn=continue_with_timer,
outputs=[target_display, predicted_display, timer_display, user_prompt, eeg_plot,
left_hand_sound, right_hand_sound, left_leg_sound, right_leg_sound, tongue_sound, composition_status,
building_instructions, dj_instructions, timer, session_complete_row]
)
# Timer automatically triggers continuation
timer.tick(
fn=continue_with_timer,
outputs=[target_display, predicted_display, timer_display, user_prompt, eeg_plot,
left_hand_sound, right_hand_sound, left_leg_sound, right_leg_sound, tongue_sound, composition_status,
building_instructions, dj_instructions, timer, session_complete_row]
)
# Session completion event handlers
new_session_btn.click(
fn=start_new_session,
outputs=[target_display, predicted_display, timer_display, user_prompt, eeg_plot,
left_hand_sound, right_hand_sound, left_leg_sound, right_leg_sound, tongue_sound, composition_status,
building_instructions, dj_instructions, continue_btn, timer, session_complete_row]
)
extend_session_btn.click(
fn=extend_current_session,
outputs=[target_display, predicted_display, timer_display, user_prompt, eeg_plot,
left_hand_sound, right_hand_sound, left_leg_sound, right_leg_sound, tongue_sound, composition_status,
building_instructions, dj_instructions, continue_btn, timer, session_complete_row]
)
def stop_with_timer():
"""Stop composition and deactivate timer"""
result = stop_composition()
return (
result[0], # target_display
result[1], # predicted_display
result[2], # timer_display
result[3], # user_prompt
gr.update(visible=False), # continue_btn - hide it
gr.update(active=False) # timer - deactivate it
)
stop_btn.click(
fn=stop_with_timer,
outputs=[target_display, predicted_display, timer_display, user_prompt, continue_btn, timer]
)
# Event handlers for manual testing tab
classify_btn.click(
fn=manual_classify,
outputs=[manual_target_display, manual_predicted_display, manual_timer_display, manual_eeg_plot, manual_results,
manual_left_hand_sound, manual_right_hand_sound, manual_left_leg_sound, manual_right_leg_sound, manual_tongue_sound]
)
clear_btn.click(
fn=clear_manual,
outputs=[manual_target_display, manual_predicted_display, manual_timer_display, manual_eeg_plot, manual_results,
manual_left_hand_sound, manual_right_hand_sound, manual_left_leg_sound, manual_right_leg_sound, manual_tongue_sound]
)
# Note: No auto-loading of sounds to prevent playing all sounds on startup
return demo
if __name__ == "__main__":
demo = create_interface()
demo.launch(server_name="0.0.0.0", server_port=7867)