KU_SW_Academy / models /ai_effector.py
heybaeheef's picture
Upload ai_effector.py
1242aec verified
raw
history blame
20.9 kB
"""
AI Effector - DiffVox LLM ๊ธฐ๋ฐ˜ ์ดํŽ™ํŠธ ํŒŒ๋ผ๋ฏธํ„ฐ ์˜ˆ์ธก
===================================================
V2: ํ•™์Šต๊ณผ ๋™์ผํ•œ CLAP ์ธ์ฝ”๋” + ํ”„๋กฌํ”„ํŠธ ํ˜•์‹ ์‚ฌ์šฉ
"""
import os
import json
import re
import torch
import numpy as np
from typing import Dict, List, Optional, Any
from pathlib import Path
from datetime import datetime
import warnings
warnings.filterwarnings("ignore")
# ๊ธฐ๋ณธ ํŒŒ๋ผ๋ฏธํ„ฐ (๋ชจ๋ธ ๋กœ๋“œ ์‹คํŒจ ์‹œ ์‚ฌ์šฉ)
DEFAULT_PARAMETERS = {
"eq_peak1.params.freq": 1000.0,
"eq_peak1.params.gain": 0.0,
"eq_peak1.params.Q": 1.0, # ๋Œ€๋ฌธ์ž Q (ํ•™์Šต ๋ฐ์ดํ„ฐ์™€ ์ผ์น˜)
"eq_peak2.params.freq": 4000.0,
"eq_peak2.params.gain": 0.0,
"eq_peak2.params.Q": 1.0,
"eq_lowshelf.params.freq": 200.0,
"eq_lowshelf.params.gain": 0.0,
"eq_highshelf.params.freq": 8000.0,
"eq_highshelf.params.gain": 0.0,
"distortion_amount": 0.0,
"delay.delay_time": 0.02,
"delay.feedback": 0.3,
"delay.mix": 0.2,
"final_wet_mix": 0.5
}
# ์Šคํƒ€์ผ ํ”„๋ฆฌ์…‹ (AI ์—†์ด๋„ ์ž‘๋™)
STYLE_PRESETS = {
"warm": {
"eq_lowshelf.params.gain": 3.0,
"eq_highshelf.params.gain": -1.0,
"distortion_amount": 0.05,
},
"bright": {
"eq_highshelf.params.gain": 4.0,
"eq_peak2.params.gain": 2.0,
"eq_lowshelf.params.gain": -1.0,
},
"vintage": {
"eq_lowshelf.params.gain": 2.0,
"eq_highshelf.params.gain": -2.0,
"distortion_amount": 0.1,
"delay.mix": 0.15,
},
"modern": {
"eq_peak1.params.gain": 2.0,
"eq_peak2.params.gain": 3.0,
"eq_highshelf.params.gain": 2.0,
},
"spacious": {
"delay.delay_time": 0.05,
"delay.feedback": 0.4,
"delay.mix": 0.35,
},
"dry": {
"final_wet_mix": 0.2,
"delay.mix": 0.0,
},
"saturated": {
"distortion_amount": 0.15,
"eq_lowshelf.params.gain": 1.0,
}
}
class CLAPAudioEncoder:
"""
CLAP ๊ธฐ๋ฐ˜ ์˜ค๋””์˜ค ์ธ์ฝ”๋” (ํ•™์Šต ์‹œ์™€ ๋™์ผ)
laion/larger_clap_music ๋ชจ๋ธ ์‚ฌ์šฉ, 512โ†’64 pooling
"""
def __init__(self, output_dim: int = 64, model_name: str = "laion/larger_clap_music"):
self.output_dim = output_dim
self.model_name = model_name
self.target_sr = 48000 # CLAP์€ 48kHz ์‚ฌ์šฉ
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = None
self.processor = None
self._load_model()
def _load_model(self):
"""CLAP ๋ชจ๋ธ ๋กœ๋“œ"""
try:
from transformers import ClapModel, ClapProcessor
print(f"[CLAPEncoder] CLAP ๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘: {self.model_name}")
self.processor = ClapProcessor.from_pretrained(self.model_name)
self.model = ClapModel.from_pretrained(self.model_name)
self.model = self.model.to(self.device)
self.model.eval()
print(f"[CLAPEncoder] โœ… CLAP ๋ชจ๋ธ ๋กœ๋“œ ์™„๋ฃŒ (512โ†’{self.output_dim} pooling)")
except ImportError:
print("[CLAPEncoder] โŒ transformers ๋ฏธ์„ค์น˜")
print(" pip install transformers")
except Exception as e:
print(f"[CLAPEncoder] โŒ ๋ชจ๋ธ ๋กœ๋“œ ์‹คํŒจ: {e}")
import traceback
traceback.print_exc()
def get_audio_features(self, audio_path: str) -> List[float]:
"""
์˜ค๋””์˜ค ํŒŒ์ผ์—์„œ 64์ฐจ์› ํŠน์ง• ๋ฒกํ„ฐ ์ถ”์ถœ (ํ•™์Šต๊ณผ ๋™์ผํ•œ ๋ฐฉ์‹)
"""
if self.model is None:
print("[CLAPEncoder] ๋ชจ๋ธ์ด ๋กœ๋“œ๋˜์ง€ ์•Š์Œ, ๋นˆ ํŠน์ง• ๋ฐ˜ํ™˜")
return [0.0] * self.output_dim
try:
import librosa
# 1. ์˜ค๋””์˜ค ๋กœ๋“œ (48kHz๋กœ ๋ฆฌ์ƒ˜ํ”Œ๋ง - CLAP ์š”๊ตฌ์‚ฌํ•ญ)
audio, sr = librosa.load(audio_path, sr=self.target_sr, mono=True)
# 2. CLAP ์ž…๋ ฅ ์ค€๋น„
inputs = self.processor(
audios=audio,
sampling_rate=self.target_sr,
return_tensors="pt",
padding=True
).to(self.device)
# 3. ํŠน์ง• ์ถ”์ถœ
with torch.no_grad():
outputs = self.model.get_audio_features(**inputs)
# [1, 512] ํ˜•ํƒœ์˜ ํ…์„œ
features_512 = outputs[0].cpu().numpy()
# 4. 512 โ†’ 64 ์ฐจ์› ์ถ•์†Œ (ํ‰๊ท  ํ’€๋ง, ํ•™์Šต๊ณผ ๋™์ผ)
features_64 = self._reduce_dimension(features_512)
return features_64.tolist()
except Exception as e:
print(f"[CLAPEncoder] ํŠน์ง• ์ถ”์ถœ ์‹คํŒจ: {e}")
import traceback
traceback.print_exc()
return [0.0] * self.output_dim
def _reduce_dimension(self, features: np.ndarray) -> np.ndarray:
"""512์ฐจ์› โ†’ 64์ฐจ์› ํ‰๊ท  ํ’€๋ง (ํ•™์Šต๊ณผ ๋™์ผํ•œ ๋ฐฉ์‹)"""
current_dim = len(features)
if current_dim == self.output_dim:
return features
# ํ‰๊ท  ํ’€๋ง: 8๊ฐœ์”ฉ ๋ฌถ์–ด์„œ ํ‰๊ท  (512 / 64 = 8)
pool_size = current_dim // self.output_dim
remainder = current_dim % self.output_dim
pooled = []
idx = 0
for i in range(self.output_dim):
size = pool_size + (1 if i < remainder else 0)
pooled.append(np.mean(features[idx:idx+size]))
idx += size
return np.array(pooled)
def is_loaded(self) -> bool:
return self.model is not None
class AIEffector:
"""AI ๊ธฐ๋ฐ˜ ์ดํŽ™ํ„ฐ ํŒŒ๋ผ๋ฏธํ„ฐ ์˜ˆ์ธก (V2: ํ•™์Šต๊ณผ ๋™์ผํ•œ ์„ค์ •)"""
def __init__(
self,
model_repo_id: str = "heybaeheef/KU_SW_Academy",
model_subfolder: str = "checkpoints",
base_model_name: str = "Qwen/Qwen3-8B",
audio_feature_dim: int = 64,
use_huggingface: bool = True
):
self.model_repo_id = model_repo_id
self.model_subfolder = model_subfolder
self.base_model_name = base_model_name
self.audio_feature_dim = audio_feature_dim
self.use_huggingface = use_huggingface
self.model = None
self.tokenizer = None
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# โ˜…โ˜…โ˜… ํ•ต์‹ฌ ์ˆ˜์ •: CLAP ์˜ค๋””์˜ค ์ธ์ฝ”๋” ์‚ฌ์šฉ (ํ•™์Šต๊ณผ ๋™์ผ) โ˜…โ˜…โ˜…
print(f"[AIEffector] CLAP ์˜ค๋””์˜ค ์ธ์ฝ”๋” ์ดˆ๊ธฐํ™” ์ค‘...")
self.audio_encoder = CLAPAudioEncoder(output_dim=audio_feature_dim)
# ์š”์ฒญ ์นด์šดํ„ฐ
self.request_count = 0
# ๋ชจ๋ธ ๋กœ๋“œ ์‹œ๋„
self._load_model()
def _load_model(self):
"""๋ชจ๋ธ ๋กœ๋“œ"""
try:
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from peft import PeftModel
print(f"[AIEffector] ๋ฒ ์ด์Šค ๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘: {self.base_model_name}")
# 4bit ์–‘์žํ™” ์„ค์ •
if torch.cuda.is_available():
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True
)
base_model = AutoModelForCausalLM.from_pretrained(
self.base_model_name,
quantization_config=bnb_config,
device_map="auto",
trust_remote_code=True
)
else:
base_model = AutoModelForCausalLM.from_pretrained(
self.base_model_name,
torch_dtype=torch.float32,
device_map="auto",
trust_remote_code=True
)
self.tokenizer = AutoTokenizer.from_pretrained(
self.base_model_name,
trust_remote_code=True
)
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
print(f"[AIEffector] LoRA ์–ด๋Œ‘ํ„ฐ ๋กœ๋”ฉ ์ค‘...")
if self.use_huggingface:
print(f"[AIEffector] HuggingFace์—์„œ LoRA ๋กœ๋”ฉ: {self.model_repo_id}/{self.model_subfolder}")
self.model = PeftModel.from_pretrained(
base_model,
self.model_repo_id,
subfolder=self.model_subfolder,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
)
else:
local_path = os.path.join(self.model_repo_id, self.model_subfolder)
print(f"[AIEffector] ๋กœ์ปฌ์—์„œ LoRA ์–ด๋Œ‘ํ„ฐ ๋กœ๋”ฉ: {local_path}")
self.model = PeftModel.from_pretrained(
base_model,
local_path,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
)
self.model.eval()
print(f"[AIEffector] โœ… ๋ชจ๋ธ ๋กœ๋“œ ์„ฑ๊ณต!")
except Exception as e:
print(f"[AIEffector] โŒ ๋ชจ๋ธ ๋กœ๋“œ ์‹คํŒจ: {e}")
import traceback
traceback.print_exc()
print(f"[AIEffector] ํด๋ฐฑ ๋ชจ๋“œ๋กœ ์ „ํ™˜ (ํ”„๋ฆฌ์…‹ ๊ธฐ๋ฐ˜)")
self.model = None
self.tokenizer = None
def is_loaded(self) -> bool:
"""๋ชจ๋ธ ๋กœ๋“œ ์—ฌ๋ถ€"""
return self.model is not None
def _apply_preset(self, prompt: str) -> Dict[str, float]:
"""ํ”„๋กฌํ”„ํŠธ์—์„œ ํ”„๋ฆฌ์…‹ ๋งค์นญ"""
params = DEFAULT_PARAMETERS.copy()
prompt_lower = prompt.lower()
matched_presets = []
for style_name, style_params in STYLE_PRESETS.items():
if style_name in prompt_lower:
params.update(style_params)
matched_presets.append(style_name)
if matched_presets:
print(f" [Preset] ๋งค์นญ๋œ ํ”„๋ฆฌ์…‹: {matched_presets}")
return params
def _format_prompt(self, text_prompt: str, audio_features: List[float]) -> str:
"""
โ˜…โ˜…โ˜… ํ•ต์‹ฌ ์ˆ˜์ •: ํ•™์Šต ์‹œ์™€ ๋™์ผํ•œ ํ”„๋กฌํ”„ํŠธ ํ˜•์‹ ์‚ฌ์šฉ โ˜…โ˜…โ˜…
train_model.py์˜ 243-246์ค„๊ณผ ๋™์ผํ•œ ํ˜•์‹
"""
audio_state_str = json.dumps(audio_features)
# ํ•™์Šต ์‹œ์™€ ์™„์ „ํžˆ ๋™์ผํ•œ ํ˜•์‹!
prompt = f"""Task: Convert text to audio parameters.
Audio: {audio_state_str}
Text: {text_prompt}
Parameters:"""
return prompt
def _parse_output(self, output_text: str) -> Dict[str, float]:
"""LLM ์ถœ๋ ฅ์—์„œ ํŒŒ๋ผ๋ฏธํ„ฐ ์ถ”์ถœ (ํ–ฅ์ƒ๋œ ๋ฒ„์ „)"""
print(f" [Parse] Raw output ๊ธธ์ด: {len(output_text)} ๋ฌธ์ž")
try:
text = output_text
# 1. <think>...</think> ํƒœ๊ทธ ์ œ๊ฑฐ (Qwen3 thinking mode)
text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
# 2. ๋งˆํฌ๋‹ค์šด ์ฝ”๋“œ๋ธ”๋ก ์ถ”์ถœ
code_block_match = re.search(r'```(?:json)?\s*([\s\S]*?)```', text)
if code_block_match:
text = code_block_match.group(1)
print(f" [Parse] ์ฝ”๋“œ๋ธ”๋ก์—์„œ JSON ์ถ”์ถœ")
# 3. JSON ๊ฐ์ฒด ์ฐพ๊ธฐ (์ค‘์ฒฉ ๋ธŒ๋ ˆ์ด์Šค ์ง€์›)
json_str = self._extract_json_object(text)
if json_str:
print(f" [Parse] ์ถ”์ถœ๋œ JSON (์ฒ˜์Œ 200์ž):\n{json_str[:200]}...")
# 4. JSON ์ „์ฒ˜๋ฆฌ
json_str = self._preprocess_json(json_str)
# 5. ํŒŒ์‹ฑ ์‹œ๋„
params = json.loads(json_str)
# 6. ๊ฒฐ๊ณผ ๊ฒ€์ฆ ๋ฐ ๋งคํ•‘
result = DEFAULT_PARAMETERS.copy()
for key, value in params.items():
# ํ‚ค ์ •๊ทœํ™” (๋Œ€์†Œ๋ฌธ์ž ์ฒ˜๋ฆฌ)
normalized_key = self._normalize_key(key)
if normalized_key in result:
try:
result[normalized_key] = float(value)
except (ValueError, TypeError):
pass
print(f" [Parse] โœ… ํŒŒ์‹ฑ ์„ฑ๊ณต! {len(params)}๊ฐœ ํŒŒ๋ผ๋ฏธํ„ฐ ์ถ”์ถœ")
return result
else:
print(f" [Parse] โŒ JSON ๊ฐ์ฒด๋ฅผ ์ฐพ์„ ์ˆ˜ ์—†์Œ")
except json.JSONDecodeError as e:
print(f" [Parse] โŒ JSON ํŒŒ์‹ฑ ์—๋Ÿฌ: {e}")
if json_str:
print(f" [Parse] ๋ฌธ์ œ ์œ„์น˜ ๊ทผ์ฒ˜: ...{json_str[max(0, e.pos-20):e.pos+20]}...")
except Exception as e:
print(f" [Parse] โŒ ์˜ˆ์™ธ ๋ฐœ์ƒ: {e}")
print(f" [Parse] โš ๏ธ ๊ธฐ๋ณธ๊ฐ’์œผ๋กœ ํด๋ฐฑ")
return DEFAULT_PARAMETERS.copy()
def _normalize_key(self, key: str) -> str:
"""ํŒŒ๋ผ๋ฏธํ„ฐ ํ‚ค ์ •๊ทœํ™” (๋Œ€์†Œ๋ฌธ์ž ์ฒ˜๋ฆฌ)"""
# Q/q ์ •๊ทœํ™”
if key.endswith('.q'):
return key[:-2] + '.Q'
return key
def _extract_json_object(self, text: str) -> Optional[str]:
"""ํ…์ŠคํŠธ์—์„œ JSON ๊ฐ์ฒด ์ถ”์ถœ (์ค‘์ฒฉ ๋ธŒ๋ ˆ์ด์Šค ์ง€์›)"""
start = text.find('{')
if start == -1:
return None
depth = 0
for i, char in enumerate(text[start:], start):
if char == '{':
depth += 1
elif char == '}':
depth -= 1
if depth == 0:
return text[start:i+1]
return None
def _preprocess_json(self, json_str: str) -> str:
"""JSON ๋ฌธ์ž์—ด ์ „์ฒ˜๋ฆฌ"""
# Trailing comma ์ œ๊ฑฐ
json_str = re.sub(r',(\s*[}\]])', r'\1', json_str)
# NaN, Infinity ์ฒ˜๋ฆฌ
json_str = re.sub(r'\bNaN\b', '0', json_str)
json_str = re.sub(r'\bInfinity\b', '999999', json_str)
json_str = re.sub(r'-Infinity\b', '-999999', json_str)
return json_str
def predict(self, audio_path: str, text_prompt: str = "") -> Dict[str, float]:
"""ํŒŒ๋ผ๋ฏธํ„ฐ ์˜ˆ์ธก"""
self.request_count += 1
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"\n{'='*60}")
print(f"[AIEffector] ๐ŸŽต ์š”์ฒญ #{self.request_count} - {timestamp}")
print(f"{'='*60}")
print(f" ๐Ÿ“‚ ์˜ค๋””์˜ค ํŒŒ์ผ: {Path(audio_path).name}")
print(f" ๐Ÿ’ฌ ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ: '{text_prompt}'")
print(f" ๐Ÿค– ๋ชจ๋ธ ์ƒํƒœ: {'AI ๋ชจ๋“œ' if self.is_loaded() else 'ํ”„๋ฆฌ์…‹ ๋ชจ๋“œ'}")
print(f" ๐ŸŽง ์ธ์ฝ”๋”: CLAP (ํ•™์Šต๊ณผ ๋™์ผ)")
# ๋ชจ๋ธ์ด ์—†์œผ๋ฉด ํ”„๋ฆฌ์…‹ ์‚ฌ์šฉ
if not self.is_loaded():
print(f"\n โš ๏ธ AI ๋ชจ๋ธ ๋ฏธ๋กœ๋“œ - ํ”„๋ฆฌ์…‹ ๋ชจ๋“œ ์‚ฌ์šฉ")
params = self._apply_preset(text_prompt)
self._log_parameters(params)
return self._convert_to_effect_chain_format(params)
try:
# 1. CLAP ์˜ค๋””์˜ค ํŠน์ง• ์ถ”์ถœ (ํ•™์Šต๊ณผ ๋™์ผ)
print(f"\n ๐Ÿ“Š [Step 1] CLAP ์˜ค๋””์˜ค ํŠน์ง• ์ถ”์ถœ ์ค‘...")
audio_features = self.audio_encoder.get_audio_features(audio_path)
if not audio_features or all(f == 0 for f in audio_features):
print(f" โš ๏ธ ํŠน์ง• ์ถ”์ถœ ์‹คํŒจ, ํ”„๋ฆฌ์…‹์œผ๋กœ ํด๋ฐฑ")
params = self._apply_preset(text_prompt)
self._log_parameters(params)
return self._convert_to_effect_chain_format(params)
print(f" โœ… {len(audio_features)}์ฐจ์› ํŠน์ง• ์ถ”์ถœ ์™„๋ฃŒ")
print(f" - ํŠน์ง• ๋ฒกํ„ฐ (์ฒ˜์Œ 8๊ฐœ): {[round(v, 3) for v in audio_features[:8]]}")
# 2. LLM ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ (ํ•™์Šต๊ณผ ๋™์ผํ•œ ํ˜•์‹)
print(f"\n ๐Ÿ”ค [Step 2] LLM ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ ์ค‘ (ํ•™์Šต ํ˜•์‹)...")
prompt = self._format_prompt(text_prompt, audio_features)
print(f" - ํ”„๋กฌํ”„ํŠธ ๊ธธ์ด: {len(prompt)} ๋ฌธ์ž")
# 3. ํ† ํฐํ™”
print(f"\n ๐Ÿ”ข [Step 3] ํ† ํฐํ™” ์ค‘...")
inputs = self.tokenizer(
prompt,
return_tensors="pt",
truncation=True,
max_length=1500 # ํ•™์Šต ์‹œ์™€ ๋™์ผ
).to(self.device)
print(f" - ์ž…๋ ฅ ํ† ํฐ ์ˆ˜: {inputs['input_ids'].shape[1]}")
# 4. LLM ์ƒ์„ฑ
print(f"\n ๐Ÿง  [Step 4] LLM ์ถ”๋ก  ์ค‘...")
import time
start_time = time.time()
with torch.no_grad():
outputs = self.model.generate(
**inputs,
max_new_tokens=500,
do_sample=False,
temperature=0.1,
pad_token_id=self.tokenizer.pad_token_id,
eos_token_id=self.tokenizer.eos_token_id,
)
inference_time = time.time() - start_time
print(f" - ์ถ”๋ก  ์‹œ๊ฐ„: {inference_time:.2f}์ดˆ")
# 5. ๋””์ฝ”๋”ฉ (์ƒ์„ฑ๋œ ๋ถ€๋ถ„๋งŒ)
print(f"\n ๐Ÿ“ [Step 5] ์ถœ๋ ฅ ๋””์ฝ”๋”ฉ ์ค‘...")
generated_tokens = outputs[0][inputs['input_ids'].shape[1]:]
output_text = self.tokenizer.decode(generated_tokens, skip_special_tokens=True).strip()
print(f" - LLM ์ถœ๋ ฅ (์ฒ˜์Œ 300์ž):\n{output_text[:300]}")
# 6. ํŒŒ์‹ฑ
print(f"\n ๐Ÿ”ง [Step 6] ํŒŒ๋ผ๋ฏธํ„ฐ ํŒŒ์‹ฑ ์ค‘...")
params = self._parse_output(output_text)
# 7. ๊ฒฐ๊ณผ ๋กœ๊น…
self._log_parameters(params)
print(f"\n โœ… AI ์˜ˆ์ธก ์™„๋ฃŒ!")
print(f"{'='*60}\n")
# effect_chain.py ํ˜•์‹์œผ๋กœ ๋ณ€ํ™˜
return self._convert_to_effect_chain_format(params)
except Exception as e:
print(f"\n โŒ ์˜ˆ์ธก ์‹คํŒจ: {e}")
import traceback
traceback.print_exc()
print(f" โš ๏ธ ํ”„๋ฆฌ์…‹์œผ๋กœ ํด๋ฐฑ...")
params = self._apply_preset(text_prompt)
self._log_parameters(params)
return self._convert_to_effect_chain_format(params)
def _convert_to_effect_chain_format(self, params: Dict[str, float]) -> Dict[str, float]:
"""
ํ•™์Šต ๋ฐ์ดํ„ฐ ํ˜•์‹ โ†’ effect_chain.py ํ˜•์‹์œผ๋กœ ๋ณ€ํ™˜
์ฃผ๋กœ Q/q ๋Œ€์†Œ๋ฌธ์ž ์ฒ˜๋ฆฌ
"""
result = {}
for key, value in params.items():
# Q โ†’ q ๋ณ€ํ™˜ (effect_chain.py๋Š” ์†Œ๋ฌธ์ž q ์‚ฌ์šฉ)
new_key = key.replace('.Q', '.q')
result[new_key] = value
return result
def _log_parameters(self, params: Dict[str, float]):
"""์˜ˆ์ธก๋œ ํŒŒ๋ผ๋ฏธํ„ฐ ๋กœ๊น…"""
print(f"\n ๐Ÿ“‹ ์˜ˆ์ธก๋œ ํŒŒ๋ผ๋ฏธํ„ฐ:")
print(f" [EQ Peak 1]")
print(f" - Freq: {params.get('eq_peak1.params.freq', 0):.1f} Hz")
print(f" - Gain: {params.get('eq_peak1.params.gain', 0):.2f} dB")
print(f" - Q: {params.get('eq_peak1.params.Q', params.get('eq_peak1.params.q', 0)):.2f}")
print(f" [EQ Peak 2]")
print(f" - Freq: {params.get('eq_peak2.params.freq', 0):.1f} Hz")
print(f" - Gain: {params.get('eq_peak2.params.gain', 0):.2f} dB")
print(f" - Q: {params.get('eq_peak2.params.Q', params.get('eq_peak2.params.q', 0)):.2f}")
print(f" [Low Shelf]")
print(f" - Freq: {params.get('eq_lowshelf.params.freq', 0):.1f} Hz")
print(f" - Gain: {params.get('eq_lowshelf.params.gain', 0):.2f} dB")
print(f" [High Shelf]")
print(f" - Freq: {params.get('eq_highshelf.params.freq', 0):.1f} Hz")
print(f" - Gain: {params.get('eq_highshelf.params.gain', 0):.2f} dB")
print(f" [Effects]")
print(f" - Distortion: {params.get('distortion_amount', 0):.3f}")
print(f" - Delay Time: {params.get('delay.delay_time', 0):.3f}s")
print(f" - Delay Feedback: {params.get('delay.feedback', 0):.2f}")
print(f" - Delay Mix: {params.get('delay.mix', 0):.2f}")
print(f" - Final Wet Mix: {params.get('final_wet_mix', 0):.2f}")