Spaces:
Running
on
A10G
Running
on
A10G
Commit
ยท
5c6cdde
1
Parent(s):
8212fa0
Fix model loading with subfolder parameter
Browse files- audio_processing/__init__.py +4 -0
- audio_processing/effect_chain.py +131 -225
- main.py +22 -7
- models/__init__.py +4 -0
- models/ai_effector.py +254 -328
- requirements.txt +3 -0
audio_processing/__init__.py
CHANGED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# audio_processing package
|
| 2 |
+
from .effect_chain import EffectChain
|
| 3 |
+
|
| 4 |
+
__all__ = ["EffectChain"]
|
audio_processing/effect_chain.py
CHANGED
|
@@ -1,255 +1,161 @@
|
|
| 1 |
"""
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
์ค์ ์ค๋์ค์ ์ดํํธ๋ฅผ ์ ์ฉํ๋ ์ฒ๋ฆฌ ์ฒด์ธ
|
| 5 |
-
|
| 6 |
-
pedalboard ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ฌ์ฉ (Spotify์์ ๋ง๋ ์ค๋์ค ํ๋ฌ๊ทธ์ธ ๋ผ์ด๋ธ๋ฌ๋ฆฌ)
|
| 7 |
-
- ๊ณ ํ์ง VST ์์ค์ ์ดํํธ
|
| 8 |
-
- Python์์ ์ฝ๊ฒ ์ฌ์ฉ ๊ฐ๋ฅ
|
| 9 |
-
- ์ค์๊ฐ ์ฒ๋ฆฌ๋ ๊ฐ๋ฅ
|
| 10 |
"""
|
| 11 |
|
| 12 |
import numpy as np
|
| 13 |
-
from pathlib import Path
|
| 14 |
-
from typing import Dict, Any, List
|
| 15 |
import soundfile as sf
|
| 16 |
-
|
| 17 |
-
# pedalboard - ์ค๋์ค ์ดํํธ ๋ผ์ด๋ธ๋ฌ๋ฆฌ
|
| 18 |
from pedalboard import (
|
| 19 |
-
Pedalboard,
|
| 20 |
-
Compressor,
|
| 21 |
Gain,
|
| 22 |
-
LowShelfFilter,
|
| 23 |
HighShelfFilter,
|
|
|
|
| 24 |
PeakFilter,
|
| 25 |
Delay,
|
| 26 |
Reverb,
|
| 27 |
Distortion,
|
| 28 |
-
Limiter
|
| 29 |
-
HighpassFilter,
|
| 30 |
-
LowpassFilter
|
| 31 |
)
|
| 32 |
-
from pedalboard.io import AudioFile
|
| 33 |
|
| 34 |
|
| 35 |
class EffectChain:
|
| 36 |
-
"""
|
| 37 |
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
"limiter"
|
| 48 |
-
]
|
| 49 |
-
|
| 50 |
-
def __init__(self):
|
| 51 |
-
"""์ดํํธ ์ฒด์ธ ์ด๊ธฐํ"""
|
| 52 |
-
pass
|
| 53 |
|
| 54 |
def get_available_effects(self) -> List[str]:
|
| 55 |
-
"""์ฌ์ฉ ๊ฐ๋ฅํ ์ดํํธ ๋ชฉ๋ก
|
| 56 |
-
return self.
|
| 57 |
|
| 58 |
-
def
|
| 59 |
-
|
| 60 |
-
input_path: str,
|
| 61 |
-
output_path: str,
|
| 62 |
-
parameters: Dict[str, float]
|
| 63 |
-
) -> None:
|
| 64 |
-
"""
|
| 65 |
-
์ค๋์ค ํ์ผ์ ์ดํํธ ์ฒด์ธ ์ ์ฉ
|
| 66 |
-
|
| 67 |
-
Args:
|
| 68 |
-
input_path: ์
๋ ฅ ์ค๋์ค ํ์ผ ๊ฒฝ๋ก
|
| 69 |
-
output_path: ์ถ๋ ฅ ์ค๋์ค ํ์ผ ๊ฒฝ๋ก
|
| 70 |
-
parameters: ์ดํํฐ ํ๋ผ๋ฏธํฐ ๋์
๋๋ฆฌ
|
| 71 |
-
"""
|
| 72 |
-
# ์ค๋์ค ํ์ผ ์ฝ๊ธฐ
|
| 73 |
-
audio, sample_rate = sf.read(input_path)
|
| 74 |
-
|
| 75 |
-
# ๋ชจ๋
ธ๋ฉด ์คํ
๋ ์ค๋ก ๋ณํ (์ผ๋ถ ์ดํํธ๊ฐ ์คํ
๋ ์ค ํ์)
|
| 76 |
-
if len(audio.shape) == 1:
|
| 77 |
-
audio = np.column_stack([audio, audio])
|
| 78 |
-
|
| 79 |
-
# float32๋ก ๋ณํ
|
| 80 |
-
audio = audio.astype(np.float32)
|
| 81 |
|
| 82 |
-
# ์ดํํธ ์ฒด์ธ ๊ตฌ์ฑ
|
| 83 |
-
board = self._build_pedalboard(parameters, sample_rate)
|
| 84 |
-
|
| 85 |
-
# ์ดํํธ ์ ์ฉ
|
| 86 |
-
processed = board(audio, sample_rate)
|
| 87 |
-
|
| 88 |
-
# Wet/Dry ๋ฏน์ค ์ ์ฉ
|
| 89 |
-
wet_mix = parameters.get("final_wet_mix", 0.5)
|
| 90 |
-
final_audio = (1 - wet_mix) * audio + wet_mix * processed
|
| 91 |
-
|
| 92 |
-
# ํด๋ฆฌํ ๋ฐฉ์ง
|
| 93 |
-
final_audio = np.clip(final_audio, -1.0, 1.0)
|
| 94 |
-
|
| 95 |
-
# ์ถ๋ ฅ ํ์ผ ์ ์ฅ
|
| 96 |
-
sf.write(output_path, final_audio, sample_rate)
|
| 97 |
-
|
| 98 |
-
print(f"[EffectChain] ์ฒ๋ฆฌ ์๋ฃ: {output_path}")
|
| 99 |
-
|
| 100 |
-
def _build_pedalboard(
|
| 101 |
-
self,
|
| 102 |
-
params: Dict[str, float],
|
| 103 |
-
sample_rate: int
|
| 104 |
-
) -> Pedalboard:
|
| 105 |
-
"""
|
| 106 |
-
ํ๋ผ๋ฏธํฐ๋ก๋ถํฐ pedalboard ์ดํํธ ์ฒด์ธ ๊ตฌ์ฑ
|
| 107 |
-
"""
|
| 108 |
effects = []
|
| 109 |
|
| 110 |
-
#
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
if
|
| 134 |
-
effects.append(
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
if
|
| 144 |
-
effects.append(
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
)
|
| 165 |
-
)
|
| 166 |
-
|
| 167 |
-
# Makeup Gain
|
| 168 |
-
makeup = params.get("compressor_makeup", 0)
|
| 169 |
-
if makeup != 0:
|
| 170 |
-
effects.append(Gain(gain_db=makeup))
|
| 171 |
-
|
| 172 |
-
# === Distortion Section ===
|
| 173 |
-
|
| 174 |
-
distortion_amount = params.get("distortion_amount", 0)
|
| 175 |
-
if distortion_amount > 0:
|
| 176 |
-
# pedalboard์ Distortion์ 0-100 ๋ฒ์
|
| 177 |
-
effects.append(
|
| 178 |
-
Distortion(drive_db=distortion_amount * 40) # 0-1 -> 0-40dB
|
| 179 |
-
)
|
| 180 |
-
|
| 181 |
-
# Distortion ํ ํค ์กฐ์ (Tone = LPF)
|
| 182 |
-
tone = params.get("distortion_tone", 0.5)
|
| 183 |
-
lpf_freq = 2000 + tone * 10000 # 2kHz ~ 12kHz
|
| 184 |
-
effects.append(
|
| 185 |
-
LowpassFilter(cutoff_frequency_hz=lpf_freq)
|
| 186 |
-
)
|
| 187 |
-
|
| 188 |
-
# === Time-based Effects Section ===
|
| 189 |
|
| 190 |
# Delay
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
reverb_wet = params.get("reverb_wet_dry", 0)
|
| 204 |
-
if reverb_wet > 0:
|
| 205 |
-
effects.append(
|
| 206 |
-
Reverb(
|
| 207 |
-
room_size=params.get("reverb_room_size", 0.5),
|
| 208 |
-
damping=params.get("reverb_damping", 0.5),
|
| 209 |
-
wet_level=reverb_wet,
|
| 210 |
-
dry_level=1 - reverb_wet,
|
| 211 |
-
width=1.0
|
| 212 |
-
)
|
| 213 |
-
)
|
| 214 |
-
|
| 215 |
-
# === Output Section ===
|
| 216 |
-
|
| 217 |
-
# Limiter (ํด๋ฆฌํ ๋ฐฉ์ง)
|
| 218 |
-
effects.append(
|
| 219 |
-
Limiter(
|
| 220 |
-
threshold_db=-1.0,
|
| 221 |
-
release_ms=100
|
| 222 |
-
)
|
| 223 |
-
)
|
| 224 |
|
| 225 |
return Pedalboard(effects)
|
| 226 |
|
| 227 |
-
def
|
| 228 |
-
self,
|
| 229 |
-
|
| 230 |
-
|
| 231 |
parameters: Dict[str, float]
|
| 232 |
-
) ->
|
| 233 |
-
"""
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
audio_chunk: ์ค๋์ค ๋ฐ์ดํฐ ๋ฐฐ์ด
|
| 238 |
-
sample_rate: ์ํ๋ ์ดํธ
|
| 239 |
-
parameters: ์ดํํฐ ํ๋ผ๋ฏธํฐ
|
| 240 |
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
"""
|
| 2 |
+
Effect Chain - Pedalboard ๊ธฐ๋ฐ ์ค๋์ค ์ดํํธ ์ฒ๋ฆฌ
|
| 3 |
+
=================================================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
"""
|
| 5 |
|
| 6 |
import numpy as np
|
|
|
|
|
|
|
| 7 |
import soundfile as sf
|
| 8 |
+
from typing import Dict, List, Optional
|
|
|
|
| 9 |
from pedalboard import (
|
| 10 |
+
Pedalboard,
|
| 11 |
+
Compressor,
|
| 12 |
Gain,
|
|
|
|
| 13 |
HighShelfFilter,
|
| 14 |
+
LowShelfFilter,
|
| 15 |
PeakFilter,
|
| 16 |
Delay,
|
| 17 |
Reverb,
|
| 18 |
Distortion,
|
| 19 |
+
Limiter
|
|
|
|
|
|
|
| 20 |
)
|
|
|
|
| 21 |
|
| 22 |
|
| 23 |
class EffectChain:
|
| 24 |
+
"""Pedalboard ๊ธฐ๋ฐ ์ดํํธ ์ฒด์ธ"""
|
| 25 |
|
| 26 |
+
def __init__(self, sample_rate: int = 44100):
|
| 27 |
+
self.sample_rate = sample_rate
|
| 28 |
+
|
| 29 |
+
self.available_effects = [
|
| 30 |
+
"eq_peak1", "eq_peak2",
|
| 31 |
+
"eq_lowshelf", "eq_highshelf",
|
| 32 |
+
"distortion", "delay", "compressor",
|
| 33 |
+
"reverb", "limiter"
|
| 34 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
def get_available_effects(self) -> List[str]:
|
| 37 |
+
"""์ฌ์ฉ ๊ฐ๋ฅํ ์ดํํธ ๋ชฉ๋ก"""
|
| 38 |
+
return self.available_effects
|
| 39 |
|
| 40 |
+
def _build_pedalboard(self, params: Dict[str, float]) -> Pedalboard:
|
| 41 |
+
"""ํ๋ผ๋ฏธํฐ๋ก Pedalboard ๊ตฌ์ฑ"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
effects = []
|
| 44 |
|
| 45 |
+
# Compressor (ํญ์ ์ ์ฉ)
|
| 46 |
+
effects.append(Compressor(
|
| 47 |
+
threshold_db=-18.0,
|
| 48 |
+
ratio=2.0,
|
| 49 |
+
attack_ms=10.0,
|
| 50 |
+
release_ms=100.0
|
| 51 |
+
))
|
| 52 |
+
|
| 53 |
+
# EQ Peak 1
|
| 54 |
+
freq1 = params.get("eq_peak1.params.freq", 1000.0)
|
| 55 |
+
gain1 = params.get("eq_peak1.params.gain", 0.0)
|
| 56 |
+
q1 = params.get("eq_peak1.params.q", 1.0)
|
| 57 |
+
if abs(gain1) > 0.1:
|
| 58 |
+
effects.append(PeakFilter(
|
| 59 |
+
cutoff_frequency_hz=max(20, min(20000, freq1)),
|
| 60 |
+
gain_db=max(-12, min(12, gain1)),
|
| 61 |
+
q=max(0.1, min(10, q1))
|
| 62 |
+
))
|
| 63 |
+
|
| 64 |
+
# EQ Peak 2
|
| 65 |
+
freq2 = params.get("eq_peak2.params.freq", 4000.0)
|
| 66 |
+
gain2 = params.get("eq_peak2.params.gain", 0.0)
|
| 67 |
+
q2 = params.get("eq_peak2.params.q", 1.0)
|
| 68 |
+
if abs(gain2) > 0.1:
|
| 69 |
+
effects.append(PeakFilter(
|
| 70 |
+
cutoff_frequency_hz=max(20, min(20000, freq2)),
|
| 71 |
+
gain_db=max(-12, min(12, gain2)),
|
| 72 |
+
q=max(0.1, min(10, q2))
|
| 73 |
+
))
|
| 74 |
+
|
| 75 |
+
# Low Shelf
|
| 76 |
+
freq_low = params.get("eq_lowshelf.params.freq", 200.0)
|
| 77 |
+
gain_low = params.get("eq_lowshelf.params.gain", 0.0)
|
| 78 |
+
if abs(gain_low) > 0.1:
|
| 79 |
+
effects.append(LowShelfFilter(
|
| 80 |
+
cutoff_frequency_hz=max(20, min(2000, freq_low)),
|
| 81 |
+
gain_db=max(-12, min(12, gain_low)),
|
| 82 |
+
q=0.707
|
| 83 |
+
))
|
| 84 |
+
|
| 85 |
+
# High Shelf
|
| 86 |
+
freq_high = params.get("eq_highshelf.params.freq", 8000.0)
|
| 87 |
+
gain_high = params.get("eq_highshelf.params.gain", 0.0)
|
| 88 |
+
if abs(gain_high) > 0.1:
|
| 89 |
+
effects.append(HighShelfFilter(
|
| 90 |
+
cutoff_frequency_hz=max(1000, min(20000, freq_high)),
|
| 91 |
+
gain_db=max(-12, min(12, gain_high)),
|
| 92 |
+
q=0.707
|
| 93 |
+
))
|
| 94 |
+
|
| 95 |
+
# Distortion
|
| 96 |
+
dist_amount = params.get("distortion_amount", 0.0)
|
| 97 |
+
if dist_amount > 0.01:
|
| 98 |
+
effects.append(Distortion(
|
| 99 |
+
drive_db=max(0, min(20, dist_amount * 100))
|
| 100 |
+
))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
|
| 102 |
# Delay
|
| 103 |
+
delay_time = params.get("delay.delay_time", 0.02)
|
| 104 |
+
delay_feedback = params.get("delay.feedback", 0.3)
|
| 105 |
+
delay_mix = params.get("delay.mix", 0.2)
|
| 106 |
+
if delay_mix > 0.01:
|
| 107 |
+
effects.append(Delay(
|
| 108 |
+
delay_seconds=max(0.01, min(1.0, delay_time)),
|
| 109 |
+
feedback=max(0.0, min(0.9, delay_feedback)),
|
| 110 |
+
mix=max(0.0, min(1.0, delay_mix))
|
| 111 |
+
))
|
| 112 |
+
|
| 113 |
+
# Limiter (ํญ์ ๋ง์ง๋ง์)
|
| 114 |
+
effects.append(Limiter(threshold_db=-1.0))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
return Pedalboard(effects)
|
| 117 |
|
| 118 |
+
def process(
|
| 119 |
+
self,
|
| 120 |
+
input_path: str,
|
| 121 |
+
output_path: str,
|
| 122 |
parameters: Dict[str, float]
|
| 123 |
+
) -> bool:
|
| 124 |
+
"""์ค๋์ค ํ์ผ ์ฒ๋ฆฌ"""
|
| 125 |
+
try:
|
| 126 |
+
# ์ค๋์ค ๋ก๋
|
| 127 |
+
audio, sr = sf.read(input_path)
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
+
# ๋ชจ๋
ธ/์คํ
๋ ์ค ์ฒ๋ฆฌ
|
| 130 |
+
if len(audio.shape) == 1:
|
| 131 |
+
audio = audio.reshape(-1, 1)
|
| 132 |
+
|
| 133 |
+
# float32๋ก ๋ณํ
|
| 134 |
+
audio = audio.astype(np.float32)
|
| 135 |
+
|
| 136 |
+
# Pedalboard ๊ตฌ์ฑ
|
| 137 |
+
board = self._build_pedalboard(parameters)
|
| 138 |
+
|
| 139 |
+
# ์ฒ๋ฆฌ
|
| 140 |
+
processed = board(audio, sr)
|
| 141 |
+
|
| 142 |
+
# Wet/Dry ๋ฏน์ค
|
| 143 |
+
wet_mix = parameters.get("final_wet_mix", 0.5)
|
| 144 |
+
wet_mix = max(0.0, min(1.0, wet_mix))
|
| 145 |
+
|
| 146 |
+
# ๊ธธ์ด ๋ง์ถ๊ธฐ
|
| 147 |
+
min_len = min(len(audio), len(processed))
|
| 148 |
+
output = audio[:min_len] * (1 - wet_mix) + processed[:min_len] * wet_mix
|
| 149 |
+
|
| 150 |
+
# ํด๋ฆฌํ ๋ฐฉ์ง
|
| 151 |
+
output = np.clip(output, -1.0, 1.0)
|
| 152 |
+
|
| 153 |
+
# ์ ์ฅ
|
| 154 |
+
sf.write(output_path, output, sr)
|
| 155 |
+
|
| 156 |
+
print(f"[EffectChain] โ
์ฒ๋ฆฌ ์๋ฃ: {output_path}")
|
| 157 |
+
return True
|
| 158 |
+
|
| 159 |
+
except Exception as e:
|
| 160 |
+
print(f"[EffectChain] โ ์ฒ๋ฆฌ ์คํจ: {e}")
|
| 161 |
+
raise e
|
main.py
CHANGED
|
@@ -11,6 +11,14 @@ import tempfile
|
|
| 11 |
import os
|
| 12 |
import uuid
|
| 13 |
import base64
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
# ๋ด๋ถ ๋ชจ๋
|
| 16 |
from models.ai_effector import AIEffector
|
|
@@ -20,13 +28,14 @@ from audio_processing.effect_chain import EffectChain
|
|
| 20 |
# ์ค์
|
| 21 |
# ============================================
|
| 22 |
|
| 23 |
-
# ํ์ต๋ ๋ชจ๋ธ ๊ฒฝ๋ก -
|
| 24 |
-
|
|
|
|
| 25 |
BASE_MODEL_NAME = os.environ.get("BASE_MODEL_NAME", "Qwen/Qwen3-8B")
|
| 26 |
AUDIO_FEATURE_DIM = int(os.environ.get("AUDIO_FEATURE_DIM", "64"))
|
| 27 |
USE_HUGGINGFACE = os.environ.get("USE_HUGGINGFACE", "true").lower() == "true"
|
| 28 |
|
| 29 |
-
# ์์ ํ์ผ ์ ์ฅ ๊ฒฝ๋ก
|
| 30 |
TEMP_DIR = Path(tempfile.gettempdir()) / "magicpath"
|
| 31 |
TEMP_DIR.mkdir(exist_ok=True)
|
| 32 |
|
|
@@ -53,14 +62,16 @@ app.add_middleware(
|
|
| 53 |
print("=" * 60)
|
| 54 |
print("MagicPath AI Vocal Effects Server v2.0")
|
| 55 |
print("=" * 60)
|
| 56 |
-
print(f"Model
|
|
|
|
| 57 |
print(f"Base Model: {BASE_MODEL_NAME}")
|
| 58 |
print(f"Audio Feature Dim: {AUDIO_FEATURE_DIM}")
|
| 59 |
print(f"Use Hugging Face: {USE_HUGGINGFACE}")
|
| 60 |
print("=" * 60)
|
| 61 |
|
| 62 |
ai_effector = AIEffector(
|
| 63 |
-
|
|
|
|
| 64 |
base_model_name=BASE_MODEL_NAME,
|
| 65 |
audio_feature_dim=AUDIO_FEATURE_DIM,
|
| 66 |
use_huggingface=USE_HUGGINGFACE
|
|
@@ -79,7 +90,8 @@ async def root():
|
|
| 79 |
"status": "running",
|
| 80 |
"message": "MagicPath AI Vocal Effects Server v2.0 (DiffVox LLM)",
|
| 81 |
"ai_model_loaded": ai_effector.is_loaded(),
|
| 82 |
-
"
|
|
|
|
| 83 |
"endpoints": {
|
| 84 |
"POST /process": "์ค๋์ค ํ์ผ ์ฒ๋ฆฌ ํ ๋ฐํ",
|
| 85 |
"POST /predict": "ํ๋ผ๋ฏธํฐ๋ง ์์ธก (JSON)",
|
|
@@ -96,7 +108,7 @@ async def health_check():
|
|
| 96 |
"status": "healthy",
|
| 97 |
"ai_model_loaded": ai_effector.is_loaded(),
|
| 98 |
"supported_effects": effect_chain.get_available_effects(),
|
| 99 |
-
"
|
| 100 |
"base_model": BASE_MODEL_NAME
|
| 101 |
}
|
| 102 |
|
|
@@ -128,6 +140,7 @@ async def predict_parameters(
|
|
| 128 |
})
|
| 129 |
|
| 130 |
except Exception as e:
|
|
|
|
| 131 |
raise HTTPException(status_code=500, detail=str(e))
|
| 132 |
|
| 133 |
|
|
@@ -170,6 +183,7 @@ async def process_audio(
|
|
| 170 |
)
|
| 171 |
|
| 172 |
except Exception as e:
|
|
|
|
| 173 |
if input_path and Path(input_path).exists():
|
| 174 |
os.remove(input_path)
|
| 175 |
if output_path and Path(output_path).exists():
|
|
@@ -223,6 +237,7 @@ async def process_audio_with_params(
|
|
| 223 |
})
|
| 224 |
|
| 225 |
except Exception as e:
|
|
|
|
| 226 |
if input_path and Path(input_path).exists():
|
| 227 |
os.remove(input_path)
|
| 228 |
if output_path and Path(output_path).exists():
|
|
|
|
| 11 |
import os
|
| 12 |
import uuid
|
| 13 |
import base64
|
| 14 |
+
import logging
|
| 15 |
+
from datetime import datetime
|
| 16 |
+
|
| 17 |
+
# ๋ก๊น
์ค์
|
| 18 |
+
logging.basicConfig(level=logging.INFO)
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
print(f"\n===== Application Startup at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} =====\n")
|
| 22 |
|
| 23 |
# ๋ด๋ถ ๋ชจ๋
|
| 24 |
from models.ai_effector import AIEffector
|
|
|
|
| 28 |
# ์ค์
|
| 29 |
# ============================================
|
| 30 |
|
| 31 |
+
# ํ์ต๋ ๋ชจ๋ธ ๊ฒฝ๋ก - repo_id์ subfolder ๋ถ๋ฆฌ!
|
| 32 |
+
MODEL_REPO_ID = os.environ.get("DIFFVOX_MODEL_REPO", "heybaeheef/KU_SW_Academy")
|
| 33 |
+
MODEL_SUBFOLDER = os.environ.get("DIFFVOX_MODEL_SUBFOLDER", "checkpoints")
|
| 34 |
BASE_MODEL_NAME = os.environ.get("BASE_MODEL_NAME", "Qwen/Qwen3-8B")
|
| 35 |
AUDIO_FEATURE_DIM = int(os.environ.get("AUDIO_FEATURE_DIM", "64"))
|
| 36 |
USE_HUGGINGFACE = os.environ.get("USE_HUGGINGFACE", "true").lower() == "true"
|
| 37 |
|
| 38 |
+
# ์์ ํ์ผ ์ ์ฅ ๊ฒฝ๋ก
|
| 39 |
TEMP_DIR = Path(tempfile.gettempdir()) / "magicpath"
|
| 40 |
TEMP_DIR.mkdir(exist_ok=True)
|
| 41 |
|
|
|
|
| 62 |
print("=" * 60)
|
| 63 |
print("MagicPath AI Vocal Effects Server v2.0")
|
| 64 |
print("=" * 60)
|
| 65 |
+
print(f"Model Repo: {MODEL_REPO_ID}")
|
| 66 |
+
print(f"Model Subfolder: {MODEL_SUBFOLDER}")
|
| 67 |
print(f"Base Model: {BASE_MODEL_NAME}")
|
| 68 |
print(f"Audio Feature Dim: {AUDIO_FEATURE_DIM}")
|
| 69 |
print(f"Use Hugging Face: {USE_HUGGINGFACE}")
|
| 70 |
print("=" * 60)
|
| 71 |
|
| 72 |
ai_effector = AIEffector(
|
| 73 |
+
model_repo_id=MODEL_REPO_ID,
|
| 74 |
+
model_subfolder=MODEL_SUBFOLDER,
|
| 75 |
base_model_name=BASE_MODEL_NAME,
|
| 76 |
audio_feature_dim=AUDIO_FEATURE_DIM,
|
| 77 |
use_huggingface=USE_HUGGINGFACE
|
|
|
|
| 90 |
"status": "running",
|
| 91 |
"message": "MagicPath AI Vocal Effects Server v2.0 (DiffVox LLM)",
|
| 92 |
"ai_model_loaded": ai_effector.is_loaded(),
|
| 93 |
+
"model_repo": MODEL_REPO_ID,
|
| 94 |
+
"model_subfolder": MODEL_SUBFOLDER,
|
| 95 |
"endpoints": {
|
| 96 |
"POST /process": "์ค๋์ค ํ์ผ ์ฒ๋ฆฌ ํ ๋ฐํ",
|
| 97 |
"POST /predict": "ํ๋ผ๋ฏธํฐ๋ง ์์ธก (JSON)",
|
|
|
|
| 108 |
"status": "healthy",
|
| 109 |
"ai_model_loaded": ai_effector.is_loaded(),
|
| 110 |
"supported_effects": effect_chain.get_available_effects(),
|
| 111 |
+
"model_repo": MODEL_REPO_ID,
|
| 112 |
"base_model": BASE_MODEL_NAME
|
| 113 |
}
|
| 114 |
|
|
|
|
| 140 |
})
|
| 141 |
|
| 142 |
except Exception as e:
|
| 143 |
+
logger.error(f"Predict error: {e}")
|
| 144 |
raise HTTPException(status_code=500, detail=str(e))
|
| 145 |
|
| 146 |
|
|
|
|
| 183 |
)
|
| 184 |
|
| 185 |
except Exception as e:
|
| 186 |
+
logger.error(f"Process error: {e}")
|
| 187 |
if input_path and Path(input_path).exists():
|
| 188 |
os.remove(input_path)
|
| 189 |
if output_path and Path(output_path).exists():
|
|
|
|
| 237 |
})
|
| 238 |
|
| 239 |
except Exception as e:
|
| 240 |
+
logger.error(f"Process with params error: {e}")
|
| 241 |
if input_path and Path(input_path).exists():
|
| 242 |
os.remove(input_path)
|
| 243 |
if output_path and Path(output_path).exists():
|
models/__init__.py
CHANGED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# models package
|
| 2 |
+
from .ai_effector import AIEffector
|
| 3 |
+
|
| 4 |
+
__all__ = ["AIEffector"]
|
models/ai_effector.py
CHANGED
|
@@ -1,269 +1,169 @@
|
|
| 1 |
"""
|
| 2 |
-
AI Effector
|
| 3 |
-
|
| 4 |
-
CLAP ์ธ์ฝ๋ + ํ์ต๋ LLM์ ์ฌ์ฉํ์ฌ ์ค๋์ค์์ ์ดํํฐ ํ๋ผ๋ฏธํฐ๋ฅผ ์์ธก
|
| 5 |
-
|
| 6 |
-
DiffVox LLM ํ๋ผ๋ฏธํฐ โ MagicPath ์น ํ๋ผ๋ฏธํฐ ์๋ ๋ณํ
|
| 7 |
"""
|
| 8 |
|
|
|
|
| 9 |
import json
|
| 10 |
import re
|
| 11 |
-
import os
|
| 12 |
-
from pathlib import Path
|
| 13 |
-
from typing import Dict, Any, Optional
|
| 14 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
-
|
| 17 |
-
try:
|
| 18 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 19 |
-
from peft import PeftModel
|
| 20 |
-
TRANSFORMERS_AVAILABLE = True
|
| 21 |
-
except ImportError:
|
| 22 |
-
TRANSFORMERS_AVAILABLE = False
|
| 23 |
-
print("[AIEffector] transformers/peft ๋ฏธ์ค์น - ํ๋ฆฌ์
๋ชจ๋๋ก ๋์")
|
| 24 |
-
|
| 25 |
-
# CLAP ์ธ์ฝ๋ (๋ณ๋ ํ์ผ)
|
| 26 |
-
try:
|
| 27 |
-
from models.audio_encoder import AudioEncoder
|
| 28 |
-
AUDIO_ENCODER_AVAILABLE = True
|
| 29 |
-
except ImportError:
|
| 30 |
-
AUDIO_ENCODER_AVAILABLE = False
|
| 31 |
-
print("[AIEffector] AudioEncoder ๋ฏธ์ค์น - ํ๋ฆฌ์
๋ชจ๋๋ก ๋์")
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
"
|
| 44 |
-
"
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
"
|
| 48 |
-
"
|
| 49 |
-
|
| 50 |
-
"
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
"
|
| 55 |
-
"
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
"
|
| 59 |
-
|
| 60 |
-
"
|
| 61 |
-
}
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
"eq_lowshelf_gain": lambda x: x * 12,
|
| 70 |
-
"eq_highshelf_gain": lambda x: x * 12,
|
| 71 |
-
"eq_peak1_gain": lambda x: x * 12,
|
| 72 |
-
"eq_peak2_gain": lambda x: x * 12,
|
| 73 |
-
# EQ freq: ์ ๊ทํ๋ ๊ฐ โ Hz (๋ก๊ทธ ์ค์ผ์ผ ์ญ๋ณํ ํ์ํ ์ ์์)
|
| 74 |
-
"eq_lowshelf_freq": lambda x: 20 * (20000/20) ** ((x + 1) / 2), # -1~1 โ 20~20000
|
| 75 |
-
"eq_highshelf_freq": lambda x: 20 * (20000/20) ** ((x + 1) / 2),
|
| 76 |
-
"eq_peak1_freq": lambda x: 20 * (20000/20) ** ((x + 1) / 2),
|
| 77 |
-
"eq_peak2_freq": lambda x: 20 * (20000/20) ** ((x + 1) / 2),
|
| 78 |
-
# Q: -1~1 โ 0.1~10
|
| 79 |
-
"eq_peak1_q": lambda x: 0.1 * (10/0.1) ** ((x + 1) / 2),
|
| 80 |
-
"eq_peak2_q": lambda x: 0.1 * (10/0.1) ** ((x + 1) / 2),
|
| 81 |
-
# Delay time: -1~1 โ 0~1000 ms
|
| 82 |
-
"delay_time": lambda x: (x + 1) / 2 * 1000,
|
| 83 |
-
# Delay feedback: -1~1 โ 0~1
|
| 84 |
-
"delay_feedback": lambda x: (x + 1) / 2,
|
| 85 |
-
# Delay mix: -1~1 โ 0~1
|
| 86 |
-
"delay_mix": lambda x: (x + 1) / 2,
|
| 87 |
-
# Distortion: -1~1 โ 0~1
|
| 88 |
-
"distortion_amount": lambda x: (x + 1) / 2,
|
| 89 |
-
# Wet mix: -1~1 โ 0~1
|
| 90 |
-
"final_wet_mix": lambda x: (x + 1) / 2,
|
| 91 |
}
|
| 92 |
-
|
| 93 |
-
@classmethod
|
| 94 |
-
def diffvox_to_web(cls, diffvox_params: Dict[str, float]) -> Dict[str, float]:
|
| 95 |
-
"""DiffVox LLM ์ถ๋ ฅ โ MagicPath ์น ํ๋ผ๋ฏธํฐ"""
|
| 96 |
-
web_params = {}
|
| 97 |
-
|
| 98 |
-
for diffvox_key, value in diffvox_params.items():
|
| 99 |
-
# ํค ๋ณํ
|
| 100 |
-
if diffvox_key in cls.DIFFVOX_TO_WEB:
|
| 101 |
-
web_key = cls.DIFFVOX_TO_WEB[diffvox_key]
|
| 102 |
-
else:
|
| 103 |
-
# ๋งคํ์ ์์ผ๋ฉด ์คํต
|
| 104 |
-
continue
|
| 105 |
-
|
| 106 |
-
# ๊ฐ ๋ณํ
|
| 107 |
-
if web_key in cls.VALUE_TRANSFORMS:
|
| 108 |
-
try:
|
| 109 |
-
web_params[web_key] = cls.VALUE_TRANSFORMS[web_key](value)
|
| 110 |
-
except:
|
| 111 |
-
web_params[web_key] = value
|
| 112 |
-
else:
|
| 113 |
-
web_params[web_key] = value
|
| 114 |
-
|
| 115 |
-
return web_params
|
| 116 |
|
| 117 |
|
| 118 |
-
class
|
| 119 |
-
"""
|
| 120 |
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
# ๋ฐฉ๋ฒ 1: JSON ๋ธ๋ก ์ฐพ๊ธฐ
|
| 126 |
-
json_patterns = [
|
| 127 |
-
r'\{[^{}]*\}',
|
| 128 |
-
r'\{(?:[^{}]|\{[^{}]*\})*\}',
|
| 129 |
-
]
|
| 130 |
-
|
| 131 |
-
for pattern in json_patterns:
|
| 132 |
-
matches = re.findall(pattern, llm_output, re.DOTALL)
|
| 133 |
-
for match in matches:
|
| 134 |
-
try:
|
| 135 |
-
params = json.loads(match)
|
| 136 |
-
if isinstance(params, dict) and len(params) > 0:
|
| 137 |
-
return params
|
| 138 |
-
except json.JSONDecodeError:
|
| 139 |
-
continue
|
| 140 |
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
|
| 156 |
|
| 157 |
class AIEffector:
|
| 158 |
-
"""AI ๊ธฐ๋ฐ ์ดํํฐ ํ๋ผ๋ฏธํฐ ์์ธก
|
| 159 |
-
|
| 160 |
-
# ๊ธฐ๋ณธ ํ๋ผ๋ฏธํฐ
|
| 161 |
-
DEFAULT_PARAMS = {
|
| 162 |
-
"eq_lowshelf_gain": 0.0,
|
| 163 |
-
"eq_lowshelf_freq": 200,
|
| 164 |
-
"eq_highshelf_gain": 0.0,
|
| 165 |
-
"eq_highshelf_freq": 8000,
|
| 166 |
-
"eq_peak1_gain": 0.0,
|
| 167 |
-
"eq_peak1_freq": 1000,
|
| 168 |
-
"eq_peak1_q": 1.0,
|
| 169 |
-
"eq_peak2_gain": 0.0,
|
| 170 |
-
"eq_peak2_freq": 3000,
|
| 171 |
-
"eq_peak2_q": 1.0,
|
| 172 |
-
"compressor_threshold": -24,
|
| 173 |
-
"compressor_ratio": 4.0,
|
| 174 |
-
"compressor_attack": 5,
|
| 175 |
-
"compressor_release": 50,
|
| 176 |
-
"compressor_makeup": 0.0,
|
| 177 |
-
"distortion_amount": 0.0,
|
| 178 |
-
"distortion_tone": 0.5,
|
| 179 |
-
"delay_time": 250,
|
| 180 |
-
"delay_feedback": 0.3,
|
| 181 |
-
"delay_mix": 0.0,
|
| 182 |
-
"reverb_room_size": 0.5,
|
| 183 |
-
"reverb_damping": 0.5,
|
| 184 |
-
"reverb_wet_dry": 0.0,
|
| 185 |
-
"final_wet_mix": 0.5
|
| 186 |
-
}
|
| 187 |
-
|
| 188 |
-
# ํ๋ฆฌ์
(fallback์ฉ)
|
| 189 |
-
PRESETS = {
|
| 190 |
-
"warm": {
|
| 191 |
-
"eq_lowshelf_gain": 5.5,
|
| 192 |
-
"eq_lowshelf_freq": 200,
|
| 193 |
-
"eq_highshelf_gain": -1.5,
|
| 194 |
-
"eq_highshelf_freq": 8000,
|
| 195 |
-
"eq_peak1_gain": 2.0,
|
| 196 |
-
"eq_peak1_freq": 400,
|
| 197 |
-
"eq_peak1_q": 1.0,
|
| 198 |
-
"compressor_threshold": -18,
|
| 199 |
-
"compressor_ratio": 3.0,
|
| 200 |
-
"distortion_amount": 0.05,
|
| 201 |
-
"reverb_room_size": 0.4,
|
| 202 |
-
"reverb_wet_dry": 0.15,
|
| 203 |
-
"final_wet_mix": 0.5
|
| 204 |
-
},
|
| 205 |
-
"bright": {
|
| 206 |
-
"eq_lowshelf_gain": -2.0,
|
| 207 |
-
"eq_lowshelf_freq": 150,
|
| 208 |
-
"eq_highshelf_gain": 4.0,
|
| 209 |
-
"eq_highshelf_freq": 6000,
|
| 210 |
-
"eq_peak1_gain": 1.0,
|
| 211 |
-
"eq_peak1_freq": 3000,
|
| 212 |
-
"compressor_threshold": -20,
|
| 213 |
-
"compressor_ratio": 6.0,
|
| 214 |
-
"reverb_room_size": 0.3,
|
| 215 |
-
"reverb_wet_dry": 0.1,
|
| 216 |
-
"final_wet_mix": 0.5
|
| 217 |
-
},
|
| 218 |
-
}
|
| 219 |
|
| 220 |
def __init__(
|
| 221 |
-
self,
|
| 222 |
-
|
|
|
|
| 223 |
base_model_name: str = "Qwen/Qwen3-8B",
|
| 224 |
audio_feature_dim: int = 64,
|
| 225 |
use_huggingface: bool = True
|
| 226 |
):
|
| 227 |
-
|
| 228 |
-
|
|
|
|
|
|
|
|
|
|
| 229 |
|
| 230 |
-
Args:
|
| 231 |
-
model_path: ํ์ต๋ LoRA ๋ชจ๋ธ ๊ฒฝ๋ก (๋ก์ปฌ ๋๋ Hugging Face ๋ ํฌ)
|
| 232 |
-
base_model_name: ๋ฒ ์ด์ค LLM ๋ชจ๋ธ ์ด๋ฆ
|
| 233 |
-
audio_feature_dim: ์ค๋์ค ํน์ง ์ฐจ์ (CLAP ์ถ๋ ฅ)
|
| 234 |
-
use_huggingface: True๋ฉด model_path๋ฅผ Hugging Face ๋ ํฌ๋ก ๊ฐ์ฃผ
|
| 235 |
-
"""
|
| 236 |
self.model = None
|
| 237 |
self.tokenizer = None
|
| 238 |
-
self.audio_encoder = None
|
| 239 |
-
self.model_loaded = False
|
| 240 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 241 |
|
| 242 |
-
|
| 243 |
-
self.
|
| 244 |
-
self.use_huggingface = use_huggingface
|
| 245 |
|
| 246 |
-
|
| 247 |
-
|
| 248 |
|
| 249 |
-
def _load_model(self
|
| 250 |
-
"""
|
| 251 |
-
if not TRANSFORMERS_AVAILABLE:
|
| 252 |
-
print("[AIEffector] transformers/peft ๋ฏธ์ค์น")
|
| 253 |
-
return
|
| 254 |
-
|
| 255 |
-
# ๋ก์ปฌ ๊ฒฝ๋ก์ธ์ง Hugging Face ๋ ํฌ์ธ์ง ํ์ธ
|
| 256 |
-
is_local = os.path.exists(model_path)
|
| 257 |
-
|
| 258 |
-
if not is_local and not self.use_huggingface:
|
| 259 |
-
print(f"[AIEffector] ๋ก์ปฌ ๋ชจ๋ธ ๊ฒฝ๋ก ์์: {model_path}")
|
| 260 |
-
return
|
| 261 |
-
|
| 262 |
try:
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
|
|
|
|
|
|
|
|
|
| 267 |
|
| 268 |
# ํ ํฌ๋์ด์ ๋ก๋
|
| 269 |
self.tokenizer = AutoTokenizer.from_pretrained(
|
|
@@ -276,129 +176,155 @@ class AIEffector:
|
|
| 276 |
# ๋ฒ ์ด์ค ๋ชจ๋ธ ๋ก๋
|
| 277 |
base_model = AutoModelForCausalLM.from_pretrained(
|
| 278 |
self.base_model_name,
|
| 279 |
-
torch_dtype=torch.
|
| 280 |
-
device_map="auto",
|
| 281 |
trust_remote_code=True,
|
|
|
|
| 282 |
)
|
| 283 |
|
| 284 |
-
# LoRA ์ด๋ํฐ
|
| 285 |
-
self.
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 297 |
)
|
| 298 |
-
print("[AIEffector] AudioEncoder ๋ก๋ ์๋ฃ")
|
| 299 |
|
| 300 |
-
self.
|
| 301 |
-
print("[AIEffector] โ
๋ชจ๋ธ ๋ก๋
|
| 302 |
|
| 303 |
except Exception as e:
|
| 304 |
print(f"[AIEffector] โ ๋ชจ๋ธ ๋ก๋ ์คํจ: {e}")
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
self.
|
| 308 |
|
| 309 |
def is_loaded(self) -> bool:
|
| 310 |
-
"""
|
| 311 |
-
return self.
|
| 312 |
|
| 313 |
-
def
|
| 314 |
-
"""
|
| 315 |
-
|
|
|
|
| 316 |
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
MagicPath ์น ํ์์ ์ดํํฐ ํ๋ผ๋ฏธํฐ ๋์
๋๋ฆฌ
|
| 323 |
-
"""
|
| 324 |
-
if self.model_loaded and self.audio_encoder:
|
| 325 |
-
return self._predict_with_model(audio_path, text_prompt)
|
| 326 |
-
else:
|
| 327 |
-
return self._predict_with_preset(text_prompt)
|
| 328 |
|
| 329 |
-
def
|
| 330 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 331 |
try:
|
| 332 |
-
#
|
| 333 |
audio_features = self.audio_encoder.get_audio_features(audio_path)
|
| 334 |
-
if not audio_features:
|
| 335 |
-
print("[AIEffector] ์ค๋์ค ํน์ง ์ถ์ถ ์คํจ, ํ๋ฆฌ์
์ฌ์ฉ")
|
| 336 |
-
return self._predict_with_preset(text_prompt)
|
| 337 |
|
| 338 |
-
#
|
| 339 |
-
|
| 340 |
-
prompt = f"""Task: Convert text to audio parameters.
|
| 341 |
-
Audio: {audio_state_str}
|
| 342 |
-
Text: {text_prompt}
|
| 343 |
-
Parameters:"""
|
| 344 |
|
| 345 |
-
#
|
| 346 |
inputs = self.tokenizer(
|
| 347 |
prompt,
|
| 348 |
return_tensors="pt",
|
| 349 |
truncation=True,
|
| 350 |
-
max_length=
|
| 351 |
).to(self.device)
|
| 352 |
|
|
|
|
| 353 |
with torch.no_grad():
|
| 354 |
outputs = self.model.generate(
|
| 355 |
**inputs,
|
| 356 |
-
max_new_tokens=
|
| 357 |
-
temperature=0.1,
|
| 358 |
do_sample=False,
|
| 359 |
-
|
|
|
|
| 360 |
)
|
| 361 |
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
skip_special_tokens=True
|
| 365 |
-
).strip()
|
| 366 |
-
|
| 367 |
-
print(f"[AIEffector] LLM ์ถ๋ ฅ: {generated_text[:200]}...")
|
| 368 |
-
|
| 369 |
-
# 4. ํ๋ผ๋ฏธํฐ ํ์ฑ
|
| 370 |
-
diffvox_params = ParameterParser.parse(generated_text)
|
| 371 |
-
|
| 372 |
-
if not diffvox_params:
|
| 373 |
-
print("[AIEffector] ํ๋ผ๋ฏธํฐ ํ์ฑ ์คํจ, ํ๋ฆฌ์
์ฌ์ฉ")
|
| 374 |
-
return self._predict_with_preset(text_prompt)
|
| 375 |
-
|
| 376 |
-
# 5. DiffVox โ Web ํ๋ผ๋ฏธํฐ ๋ณํ
|
| 377 |
-
web_params = ParameterMapper.diffvox_to_web(diffvox_params)
|
| 378 |
|
| 379 |
-
#
|
| 380 |
-
|
| 381 |
-
result.update(web_params)
|
| 382 |
|
| 383 |
-
print(f"[AIEffector] โ
AI
|
| 384 |
-
return
|
| 385 |
|
| 386 |
except Exception as e:
|
| 387 |
-
print(f"[AIEffector]
|
| 388 |
-
|
| 389 |
-
traceback.print_exc()
|
| 390 |
-
return self._predict_with_preset(text_prompt)
|
| 391 |
-
|
| 392 |
-
def _predict_with_preset(self, text_prompt: str) -> Dict[str, float]:
|
| 393 |
-
"""ํ๋ฆฌ์
๊ธฐ๋ฐ ํ๋ผ๋ฏธํฐ ๋ฐํ (fallback)"""
|
| 394 |
-
prompt_lower = text_prompt.lower()
|
| 395 |
-
|
| 396 |
-
for preset_name, preset_params in self.PRESETS.items():
|
| 397 |
-
if preset_name in prompt_lower:
|
| 398 |
-
print(f"[AIEffector] ํ๋ฆฌ์
๋งค์นญ: '{preset_name}'")
|
| 399 |
-
result = self.DEFAULT_PARAMS.copy()
|
| 400 |
-
result.update(preset_params)
|
| 401 |
-
return result
|
| 402 |
-
|
| 403 |
-
print("[AIEffector] ํ๋ฆฌ์
๋งค์นญ ์คํจ, ๊ธฐ๋ณธ๊ฐ ๋ฐํ")
|
| 404 |
-
return self.DEFAULT_PARAMS.copy()
|
|
|
|
| 1 |
"""
|
| 2 |
+
AI Effector - DiffVox LLM ๊ธฐ๋ฐ ์ดํํธ ํ๋ผ๋ฏธํฐ ์์ธก
|
| 3 |
+
===================================================
|
|
|
|
|
|
|
|
|
|
| 4 |
"""
|
| 5 |
|
| 6 |
+
import os
|
| 7 |
import json
|
| 8 |
import re
|
|
|
|
|
|
|
|
|
|
| 9 |
import torch
|
| 10 |
+
import numpy as np
|
| 11 |
+
from typing import Dict, List, Optional, Any
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
import warnings
|
| 14 |
|
| 15 |
+
warnings.filterwarnings("ignore")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
+
# ๊ธฐ๋ณธ ํ๋ผ๋ฏธํฐ (๋ชจ๋ธ ๋ก๋ ์คํจ ์ ์ฌ์ฉ)
|
| 18 |
+
DEFAULT_PARAMETERS = {
|
| 19 |
+
"eq_peak1.params.freq": 1000.0,
|
| 20 |
+
"eq_peak1.params.gain": 0.0,
|
| 21 |
+
"eq_peak1.params.q": 1.0,
|
| 22 |
+
"eq_peak2.params.freq": 4000.0,
|
| 23 |
+
"eq_peak2.params.gain": 0.0,
|
| 24 |
+
"eq_peak2.params.q": 1.0,
|
| 25 |
+
"eq_lowshelf.params.freq": 200.0,
|
| 26 |
+
"eq_lowshelf.params.gain": 0.0,
|
| 27 |
+
"eq_lowshelf.params.q": 0.707,
|
| 28 |
+
"eq_highshelf.params.freq": 8000.0,
|
| 29 |
+
"eq_highshelf.params.gain": 0.0,
|
| 30 |
+
"eq_highshelf.params.q": 0.707,
|
| 31 |
+
"distortion_amount": 0.0,
|
| 32 |
+
"delay.delay_time": 0.02,
|
| 33 |
+
"delay.feedback": 0.3,
|
| 34 |
+
"delay.mix": 0.2,
|
| 35 |
+
"final_wet_mix": 0.5
|
| 36 |
+
}
|
| 37 |
|
| 38 |
+
# ์คํ์ผ ํ๋ฆฌ์
(AI ์์ด๋ ์๋)
|
| 39 |
+
STYLE_PRESETS = {
|
| 40 |
+
"warm": {
|
| 41 |
+
"eq_lowshelf.params.gain": 3.0,
|
| 42 |
+
"eq_highshelf.params.gain": -1.0,
|
| 43 |
+
"distortion_amount": 0.05,
|
| 44 |
+
},
|
| 45 |
+
"bright": {
|
| 46 |
+
"eq_highshelf.params.gain": 4.0,
|
| 47 |
+
"eq_peak2.params.gain": 2.0,
|
| 48 |
+
"eq_lowshelf.params.gain": -1.0,
|
| 49 |
+
},
|
| 50 |
+
"vintage": {
|
| 51 |
+
"eq_lowshelf.params.gain": 2.0,
|
| 52 |
+
"eq_highshelf.params.gain": -2.0,
|
| 53 |
+
"distortion_amount": 0.1,
|
| 54 |
+
"delay.mix": 0.15,
|
| 55 |
+
},
|
| 56 |
+
"modern": {
|
| 57 |
+
"eq_peak1.params.gain": 2.0,
|
| 58 |
+
"eq_peak2.params.gain": 3.0,
|
| 59 |
+
"eq_highshelf.params.gain": 2.0,
|
| 60 |
+
},
|
| 61 |
+
"spacious": {
|
| 62 |
+
"delay.delay_time": 0.05,
|
| 63 |
+
"delay.feedback": 0.4,
|
| 64 |
+
"delay.mix": 0.35,
|
| 65 |
+
},
|
| 66 |
+
"dry": {
|
| 67 |
+
"final_wet_mix": 0.2,
|
| 68 |
+
"delay.mix": 0.0,
|
| 69 |
+
},
|
| 70 |
+
"saturated": {
|
| 71 |
+
"distortion_amount": 0.15,
|
| 72 |
+
"eq_lowshelf.params.gain": 1.0,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
}
|
| 74 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
|
| 77 |
+
class AudioEncoder:
|
| 78 |
+
"""๊ฐ์ํ๋ ์ค๋์ค ์ธ์ฝ๋ (CLAP ๋์ฒด)"""
|
| 79 |
|
| 80 |
+
def __init__(self, output_dim: int = 64):
|
| 81 |
+
self.output_dim = output_dim
|
| 82 |
+
self.sr = 44100
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
|
| 84 |
+
def get_audio_features(self, audio_path: str) -> List[float]:
|
| 85 |
+
"""์ค๋์ค์์ ํน์ง ์ถ์ถ (๊ฐ์ํ ๋ฒ์ )"""
|
| 86 |
+
try:
|
| 87 |
+
import librosa
|
| 88 |
+
|
| 89 |
+
y, sr = librosa.load(audio_path, sr=self.sr, duration=5.0)
|
| 90 |
+
|
| 91 |
+
# ๊ธฐ๋ณธ ํน์ง ์ถ์ถ
|
| 92 |
+
features = []
|
| 93 |
+
|
| 94 |
+
# MFCC (20๊ฐ)
|
| 95 |
+
mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=20)
|
| 96 |
+
features.extend(np.mean(mfcc, axis=1).tolist())
|
| 97 |
+
|
| 98 |
+
# Spectral features
|
| 99 |
+
spectral_centroid = np.mean(librosa.feature.spectral_centroid(y=y, sr=sr))
|
| 100 |
+
spectral_bandwidth = np.mean(librosa.feature.spectral_bandwidth(y=y, sr=sr))
|
| 101 |
+
spectral_rolloff = np.mean(librosa.feature.spectral_rolloff(y=y, sr=sr))
|
| 102 |
+
|
| 103 |
+
features.extend([spectral_centroid / 10000, spectral_bandwidth / 10000, spectral_rolloff / 10000])
|
| 104 |
+
|
| 105 |
+
# RMS energy
|
| 106 |
+
rms = np.mean(librosa.feature.rms(y=y))
|
| 107 |
+
features.append(float(rms))
|
| 108 |
+
|
| 109 |
+
# Zero crossing rate
|
| 110 |
+
zcr = np.mean(librosa.feature.zero_crossing_rate(y))
|
| 111 |
+
features.append(float(zcr))
|
| 112 |
+
|
| 113 |
+
# Chroma (12๊ฐ)
|
| 114 |
+
chroma = librosa.feature.chroma_stft(y=y, sr=sr)
|
| 115 |
+
features.extend(np.mean(chroma, axis=1).tolist())
|
| 116 |
+
|
| 117 |
+
# Pad or truncate to output_dim
|
| 118 |
+
if len(features) < self.output_dim:
|
| 119 |
+
features.extend([0.0] * (self.output_dim - len(features)))
|
| 120 |
+
else:
|
| 121 |
+
features = features[:self.output_dim]
|
| 122 |
+
|
| 123 |
+
return features
|
| 124 |
+
|
| 125 |
+
except Exception as e:
|
| 126 |
+
print(f"[AudioEncoder] ํน์ง ์ถ์ถ ์คํจ: {e}")
|
| 127 |
+
return [0.0] * self.output_dim
|
| 128 |
|
| 129 |
|
| 130 |
class AIEffector:
|
| 131 |
+
"""AI ๊ธฐ๋ฐ ์ดํํฐ ํ๋ผ๋ฏธํฐ ์์ธก"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
def __init__(
|
| 134 |
+
self,
|
| 135 |
+
model_repo_id: str = "heybaeheef/KU_SW_Academy",
|
| 136 |
+
model_subfolder: str = "checkpoints",
|
| 137 |
base_model_name: str = "Qwen/Qwen3-8B",
|
| 138 |
audio_feature_dim: int = 64,
|
| 139 |
use_huggingface: bool = True
|
| 140 |
):
|
| 141 |
+
self.model_repo_id = model_repo_id
|
| 142 |
+
self.model_subfolder = model_subfolder
|
| 143 |
+
self.base_model_name = base_model_name
|
| 144 |
+
self.audio_feature_dim = audio_feature_dim
|
| 145 |
+
self.use_huggingface = use_huggingface
|
| 146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
self.model = None
|
| 148 |
self.tokenizer = None
|
|
|
|
|
|
|
| 149 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 150 |
|
| 151 |
+
# ์ค๋์ค ์ธ์ฝ๋
|
| 152 |
+
self.audio_encoder = AudioEncoder(output_dim=audio_feature_dim)
|
|
|
|
| 153 |
|
| 154 |
+
# ๋ชจ๋ธ ๋ก๋ ์๋
|
| 155 |
+
self._load_model()
|
| 156 |
|
| 157 |
+
def _load_model(self):
|
| 158 |
+
"""๋ชจ๋ธ ๋ก๋"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
try:
|
| 160 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 161 |
+
from peft import PeftModel
|
| 162 |
+
|
| 163 |
+
print(f"[AIEffector] ๋ชจ๋ธ ๋ก๋ฉ ์์...")
|
| 164 |
+
print(f" - Base Model: {self.base_model_name}")
|
| 165 |
+
print(f" - Adapter Repo: {self.model_repo_id}")
|
| 166 |
+
print(f" - Adapter Subfolder: {self.model_subfolder}")
|
| 167 |
|
| 168 |
# ํ ํฌ๋์ด์ ๋ก๋
|
| 169 |
self.tokenizer = AutoTokenizer.from_pretrained(
|
|
|
|
| 176 |
# ๋ฒ ์ด์ค ๋ชจ๋ธ ๋ก๋
|
| 177 |
base_model = AutoModelForCausalLM.from_pretrained(
|
| 178 |
self.base_model_name,
|
| 179 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 180 |
+
device_map="auto" if torch.cuda.is_available() else None,
|
| 181 |
trust_remote_code=True,
|
| 182 |
+
low_cpu_mem_usage=True
|
| 183 |
)
|
| 184 |
|
| 185 |
+
# LoRA ์ด๋ํฐ ๋ก๋ (subfolder ํ๋ผ๋ฏธํฐ ์ฌ์ฉ!)
|
| 186 |
+
if self.use_huggingface:
|
| 187 |
+
print(f"[AIEffector] Hugging Face์์ LoRA ์ด๋ํฐ ๋ก๋ฉ...")
|
| 188 |
+
self.model = PeftModel.from_pretrained(
|
| 189 |
+
base_model,
|
| 190 |
+
self.model_repo_id,
|
| 191 |
+
subfolder=self.model_subfolder, # ํต์ฌ ์์ !
|
| 192 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 193 |
+
)
|
| 194 |
+
else:
|
| 195 |
+
# ๋ก์ปฌ ๊ฒฝ๋ก ์ฌ์ฉ
|
| 196 |
+
local_path = os.path.join(self.model_repo_id, self.model_subfolder)
|
| 197 |
+
print(f"[AIEffector] ๋ก์ปฌ์์ LoRA ์ด๋ํฐ ๋ก๋ฉ: {local_path}")
|
| 198 |
+
self.model = PeftModel.from_pretrained(
|
| 199 |
+
base_model,
|
| 200 |
+
local_path,
|
| 201 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 202 |
)
|
|
|
|
| 203 |
|
| 204 |
+
self.model.eval()
|
| 205 |
+
print(f"[AIEffector] โ
๋ชจ๋ธ ๋ก๋ ์ฑ๊ณต!")
|
| 206 |
|
| 207 |
except Exception as e:
|
| 208 |
print(f"[AIEffector] โ ๋ชจ๋ธ ๋ก๋ ์คํจ: {e}")
|
| 209 |
+
print(f"[AIEffector] ํด๋ฐฑ ๋ชจ๋๋ก ์ ํ (ํ๋ฆฌ์
๊ธฐ๋ฐ)")
|
| 210 |
+
self.model = None
|
| 211 |
+
self.tokenizer = None
|
| 212 |
|
| 213 |
def is_loaded(self) -> bool:
|
| 214 |
+
"""๋ชจ๋ธ ๋ก๋ ์ฌ๋ถ"""
|
| 215 |
+
return self.model is not None
|
| 216 |
|
| 217 |
+
def _apply_preset(self, prompt: str) -> Dict[str, float]:
|
| 218 |
+
"""ํ๋กฌํํธ์์ ํ๋ฆฌ์
๋งค์นญ"""
|
| 219 |
+
params = DEFAULT_PARAMETERS.copy()
|
| 220 |
+
prompt_lower = prompt.lower()
|
| 221 |
|
| 222 |
+
for style_name, style_params in STYLE_PRESETS.items():
|
| 223 |
+
if style_name in prompt_lower:
|
| 224 |
+
params.update(style_params)
|
| 225 |
+
|
| 226 |
+
return params
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 227 |
|
| 228 |
+
def _format_prompt(self, text_prompt: str, audio_features: List[float]) -> str:
|
| 229 |
+
"""LLM ์
๋ ฅ ํ๋กฌํํธ ํฌ๋งทํ
"""
|
| 230 |
+
# ์ค๋์ค ํน์ง์ ๊ฐ๊ฒฐํ๊ฒ ํํ
|
| 231 |
+
audio_summary = ", ".join([f"{v:.3f}" for v in audio_features[:8]])
|
| 232 |
+
|
| 233 |
+
prompt = f"""You are an audio effect parameter predictor.
|
| 234 |
+
|
| 235 |
+
Input:
|
| 236 |
+
- Text description: {text_prompt}
|
| 237 |
+
- Audio features (first 8): [{audio_summary}]
|
| 238 |
+
|
| 239 |
+
Output the effect parameters as JSON:
|
| 240 |
+
```json
|
| 241 |
+
{{
|
| 242 |
+
"eq_peak1.params.freq": <float>,
|
| 243 |
+
"eq_peak1.params.gain": <float>,
|
| 244 |
+
"eq_peak1.params.q": <float>,
|
| 245 |
+
"eq_peak2.params.freq": <float>,
|
| 246 |
+
"eq_peak2.params.gain": <float>,
|
| 247 |
+
"eq_peak2.params.q": <float>,
|
| 248 |
+
"eq_lowshelf.params.freq": <float>,
|
| 249 |
+
"eq_lowshelf.params.gain": <float>,
|
| 250 |
+
"eq_lowshelf.params.q": <float>,
|
| 251 |
+
"eq_highshelf.params.freq": <float>,
|
| 252 |
+
"eq_highshelf.params.gain": <float>,
|
| 253 |
+
"eq_highshelf.params.q": <float>,
|
| 254 |
+
"distortion_amount": <float>,
|
| 255 |
+
"delay.delay_time": <float>,
|
| 256 |
+
"delay.feedback": <float>,
|
| 257 |
+
"delay.mix": <float>,
|
| 258 |
+
"final_wet_mix": <float>
|
| 259 |
+
}}
|
| 260 |
+
```
|
| 261 |
+
|
| 262 |
+
JSON output:"""
|
| 263 |
+
|
| 264 |
+
return prompt
|
| 265 |
+
|
| 266 |
+
def _parse_output(self, output_text: str) -> Dict[str, float]:
|
| 267 |
+
"""LLM ์ถ๋ ฅ์์ ํ๋ผ๋ฏธํฐ ์ถ์ถ"""
|
| 268 |
+
try:
|
| 269 |
+
# JSON ๋ธ๋ก ์ฐพ๊ธฐ
|
| 270 |
+
json_match = re.search(r'\{[^{}]*\}', output_text, re.DOTALL)
|
| 271 |
+
if json_match:
|
| 272 |
+
params = json.loads(json_match.group())
|
| 273 |
+
|
| 274 |
+
# ์ ํจ์ฑ ๊ฒ์ฌ ๋ฐ ๊ธฐ๋ณธ๊ฐ ๋ณํฉ
|
| 275 |
+
result = DEFAULT_PARAMETERS.copy()
|
| 276 |
+
for key, value in params.items():
|
| 277 |
+
if key in result and isinstance(value, (int, float)):
|
| 278 |
+
result[key] = float(value)
|
| 279 |
+
|
| 280 |
+
return result
|
| 281 |
+
except Exception as e:
|
| 282 |
+
print(f"[AIEffector] ์ถ๋ ฅ ํ์ฑ ์คํจ: {e}")
|
| 283 |
+
|
| 284 |
+
return DEFAULT_PARAMETERS.copy()
|
| 285 |
+
|
| 286 |
+
def predict(self, audio_path: str, text_prompt: str = "") -> Dict[str, float]:
|
| 287 |
+
"""ํ๋ผ๋ฏธํฐ ์์ธก"""
|
| 288 |
+
|
| 289 |
+
# ๋ชจ๋ธ์ด ์์ผ๋ฉด ํ๋ฆฌ์
์ฌ์ฉ
|
| 290 |
+
if not self.is_loaded():
|
| 291 |
+
print(f"[AIEffector] ํ๋ฆฌ์
๋ชจ๋ ์ฌ์ฉ (prompt: {text_prompt})")
|
| 292 |
+
return self._apply_preset(text_prompt)
|
| 293 |
+
|
| 294 |
try:
|
| 295 |
+
# ์ค๋์ค ํน์ง ์ถ์ถ
|
| 296 |
audio_features = self.audio_encoder.get_audio_features(audio_path)
|
|
|
|
|
|
|
|
|
|
| 297 |
|
| 298 |
+
# ํ๋กฌํํธ ์์ฑ
|
| 299 |
+
prompt = self._format_prompt(text_prompt, audio_features)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 300 |
|
| 301 |
+
# ํ ํฐํ
|
| 302 |
inputs = self.tokenizer(
|
| 303 |
prompt,
|
| 304 |
return_tensors="pt",
|
| 305 |
truncation=True,
|
| 306 |
+
max_length=1024
|
| 307 |
).to(self.device)
|
| 308 |
|
| 309 |
+
# ์์ฑ
|
| 310 |
with torch.no_grad():
|
| 311 |
outputs = self.model.generate(
|
| 312 |
**inputs,
|
| 313 |
+
max_new_tokens=256,
|
|
|
|
| 314 |
do_sample=False,
|
| 315 |
+
temperature=0.1,
|
| 316 |
+
pad_token_id=self.tokenizer.pad_token_id
|
| 317 |
)
|
| 318 |
|
| 319 |
+
# ๋์ฝ๋ฉ
|
| 320 |
+
output_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 321 |
|
| 322 |
+
# ํ์ฑ
|
| 323 |
+
params = self._parse_output(output_text)
|
|
|
|
| 324 |
|
| 325 |
+
print(f"[AIEffector] โ
AI ์์ธก ์๋ฃ")
|
| 326 |
+
return params
|
| 327 |
|
| 328 |
except Exception as e:
|
| 329 |
+
print(f"[AIEffector] ์์ธก ์คํจ: {e}, ํ๋ฆฌ์
์ผ๋ก ํด๋ฐฑ")
|
| 330 |
+
return self._apply_preset(text_prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
|
@@ -18,3 +18,6 @@ transformers>=4.36.0
|
|
| 18 |
peft>=0.7.0
|
| 19 |
huggingface_hub>=0.20.0
|
| 20 |
accelerate>=0.25.0
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
peft>=0.7.0
|
| 19 |
huggingface_hub>=0.20.0
|
| 20 |
accelerate>=0.25.0
|
| 21 |
+
|
| 22 |
+
# ์ถ๊ฐ ์์กด์ฑ
|
| 23 |
+
scipy>=1.10.0
|