harmonic-catalyst / emotional_engine.py
RAM2118's picture
Upload emotional_engine.py
5d29a69 verified
"""
Emotional Context Engine for Harmonic Catalyst
Version: 1.1.0 (Phase 1 - Wired to Generation)
"""
from dataclasses import dataclass
from typing import List
@dataclass
class EmotionalContext:
"""Stores emotional parameters for a section"""
energy: float = 0.5
density: str = 'Medium'
role: str = 'Support'
movement: str = 'Flowing'
# Section types for dropdown
SECTION_TYPES = [
'Intro',
'Verse',
'Pre-Chorus',
'Chorus',
'Post-Chorus',
'Bridge',
'Breakdown',
'Drop',
'Outro',
'Custom'
]
# Sections that get numbered
NUMBERED_SECTIONS = ['Verse', 'Pre-Chorus', 'Chorus', 'Post-Chorus', 'Drop']
# Sections that stay single (no number)
SINGLE_SECTIONS = ['Intro', 'Bridge', 'Breakdown', 'Outro']
# Emotional presets
EMOTIONAL_PRESETS = {
"Sparse Intro": {
'energy': 0.1,
'density': 'Sparse',
'role': 'Ambient',
'movement': 'Static',
'description': 'Minimal, atmospheric opening'
},
"Supportive Verse": {
'energy': 0.4,
'density': 'Medium',
'role': 'Support',
'movement': 'Flowing',
'description': 'Behind vocals, smooth transitions'
},
"Building Pre-Chorus": {
'energy': 0.6,
'density': 'Medium',
'role': 'Lead',
'movement': 'Flowing',
'description': 'Increasing tension toward chorus'
},
"Explosive Chorus": {
'energy': 0.9,
'density': 'Thick',
'role': 'Lead',
'movement': 'Agitated',
'description': 'Maximum impact and fullness'
},
"Breakdown Bridge": {
'energy': 0.3,
'density': 'Sparse',
'role': 'Ambient',
'movement': 'Static',
'description': 'Stripped-down, suspended feel'
},
"Resolved Outro": {
'energy': 0.2,
'density': 'Sparse',
'role': 'Ambient',
'movement': 'Static',
'description': 'Peaceful, conclusive ending'
}
}
# Default preset for each section type
SECTION_TYPE_DEFAULTS = {
'Intro': 'Sparse Intro',
'Verse': 'Supportive Verse',
'Pre-Chorus': 'Building Pre-Chorus',
'Chorus': 'Explosive Chorus',
'Post-Chorus': 'Explosive Chorus',
'Bridge': 'Breakdown Bridge',
'Breakdown': 'Breakdown Bridge',
'Drop': 'Explosive Chorus',
'Outro': 'Resolved Outro',
'Custom': 'Supportive Verse'
}
class SectionNamer:
"""Handles auto-numbering of sections"""
@staticmethod
def generate_name(section_type: str, existing_sections: List[dict]) -> str:
if section_type == 'Custom':
return 'Custom Section'
if section_type in SINGLE_SECTIONS:
return section_type
if section_type in NUMBERED_SECTIONS:
count = sum(
1 for s in existing_sections
if s.get('section_type') == section_type
)
return f"{section_type} {count + 1}"
return section_type
@staticmethod
def get_default_preset(section_type: str) -> str:
return SECTION_TYPE_DEFAULTS.get(section_type, 'Supportive Verse')
class EmotionalAdapter:
"""Adapts voicings based on emotional context"""
@staticmethod
def adapt_solo_piano(lh: List[int], rh: List[int], emotional_ctx: EmotionalContext) -> tuple:
"""
Adapt voicing for solo piano performance.
Args:
lh: Left hand MIDI notes
rh: Right hand MIDI notes
emotional_ctx: Emotional parameters
Returns:
Tuple of (adapted_lh, adapted_rh)
"""
lh = list(lh) if lh else []
rh = list(rh) if rh else []
# ─────────────────────────────────────────────────────
# ENERGY ADAPTATION
# ─────────────────────────────────────────────────────
if emotional_ctx.energy < 0.3:
# Low energy: Quieter, thinner
if len(rh) > 3:
rh = rh[:3]
if len(lh) > 1:
lh = lh[:1]
elif emotional_ctx.energy > 0.7:
# High energy: Fuller, doubled
if lh and len(lh) < 3:
lh.append(lh[0] + 12) # Add octave
if rh and len(rh) < 6:
rh.append(rh[0] + 12) # Add octave doubling
# ─────────────────────────────────────────────────────
# DENSITY ADAPTATION
# ─────────────────────────────────────────────────────
if emotional_ctx.density == 'Sparse':
lh = lh[:1] if lh else []
rh = rh[:2] if len(rh) > 2 else rh
elif emotional_ctx.density == 'Thick':
if lh and len(lh) < 2:
lh.append(lh[0] + 12)
if rh and len(rh) < 5:
lowest = min(rh) if rh else 60
rh.append(lowest + 12)
if len(rh) < 6 and rh:
highest = max(rh)
rh.append(highest + 12)
# Clean up
lh = sorted(list(set(lh))) if lh else []
rh = sorted(list(set(rh))) if rh else []
return lh, rh
@staticmethod
def adapt_arrangement(lh: List[int], rh: List[int], emotional_ctx: EmotionalContext) -> tuple:
"""
Adapt voicing for full band arrangement.
Args:
lh: Left hand (Bass Part) MIDI notes
rh: Right hand (Chord Part) MIDI notes
emotional_ctx: Emotional parameters
Returns:
Tuple of (adapted_lh, adapted_rh)
"""
lh = list(lh) if lh else []
rh = list(rh) if rh else []
# ─────────────────────────────────────────────────────
# ENERGY ADAPTATION
# ─────────────────────────────────────────────────────
if emotional_ctx.energy < 0.3:
# Low energy: Minimal arrangement
lh = lh[:1] if lh else []
rh = rh[:2] if len(rh) > 2 else rh
elif emotional_ctx.energy > 0.7:
# High energy: Full, spread arrangement
if lh:
root = lh[0]
if len(lh) < 3:
lh = [root, root + 12, root + 19] # Root, octave, 12th
if rh and len(rh) < 6:
rh_sorted = sorted(rh)
rh.append(rh_sorted[0] + 12)
if len(rh_sorted) > 1:
rh.append(rh_sorted[1] + 12)
# ─────────────────────────────────────────────────────
# ROLE ADAPTATION
# ─────────────────────────────────────────────────────
if emotional_ctx.role == 'Support':
# Stay out of vocal range (D4 to B4 = MIDI 62-71)
vocal_zone = range(62, 72)
rh = [n + 12 if n in vocal_zone else n for n in rh]
elif emotional_ctx.role == 'Ambient':
# Very high or very low
if emotional_ctx.energy < 0.5:
# Quiet ambient = high shimmer
if rh:
rh = [n + 24 for n in rh[:2]]
lh = lh[:1] if lh else []
else:
# Louder ambient = deep
rh = rh[:3] if len(rh) > 3 else rh
# ─────────────────────────────────────────────────────
# DENSITY ADAPTATION
# ─────────────────────────────────────────────────────
if emotional_ctx.density == 'Sparse':
lh = lh[:1] if lh else []
rh = rh[:2] if len(rh) > 2 else rh
elif emotional_ctx.density == 'Thick':
if lh and len(lh) < 3:
lh.append(lh[0] + 12)
if rh and len(rh) < 6:
lowest = min(rh) if rh else 60
highest = max(rh) if rh else 72
rh.append(lowest + 12)
rh.append(highest + 12)
# Clean up
lh = sorted(list(set(lh))) if lh else []
rh = sorted(list(set(rh))) if rh else []
return lh, rh
@staticmethod
def get_voice_leading_settings(movement: str) -> dict:
"""Returns voice leading settings based on movement style"""
settings = {
'Static': {
'enabled': False,
'max_movement': 24
},
'Flowing': {
'enabled': True,
'max_movement': 5
},
'Agitated': {
'enabled': True,
'max_movement': 12
}
}
return settings.get(movement, settings['Flowing'])
def get_section_types():
"""Returns list of section types"""
return SECTION_TYPES.copy()