Spaces:
Running
on
A10G
Running
on
A10G
| """ | |
| AI Effector - DiffVox LLM ๊ธฐ๋ฐ ์ดํํธ ํ๋ผ๋ฏธํฐ ์์ธก | |
| =================================================== | |
| V2: ํ์ต๊ณผ ๋์ผํ CLAP ์ธ์ฝ๋ + ํ๋กฌํํธ ํ์ ์ฌ์ฉ | |
| """ | |
| import os | |
| import json | |
| import re | |
| import torch | |
| import numpy as np | |
| from typing import Dict, List, Optional, Any | |
| from pathlib import Path | |
| from datetime import datetime | |
| import warnings | |
| warnings.filterwarnings("ignore") | |
| # ๊ธฐ๋ณธ ํ๋ผ๋ฏธํฐ (๋ชจ๋ธ ๋ก๋ ์คํจ ์ ์ฌ์ฉ) | |
| DEFAULT_PARAMETERS = { | |
| "eq_peak1.params.freq": 1000.0, | |
| "eq_peak1.params.gain": 0.0, | |
| "eq_peak1.params.Q": 1.0, # ๋๋ฌธ์ Q (ํ์ต ๋ฐ์ดํฐ์ ์ผ์น) | |
| "eq_peak2.params.freq": 4000.0, | |
| "eq_peak2.params.gain": 0.0, | |
| "eq_peak2.params.Q": 1.0, | |
| "eq_lowshelf.params.freq": 200.0, | |
| "eq_lowshelf.params.gain": 0.0, | |
| "eq_highshelf.params.freq": 8000.0, | |
| "eq_highshelf.params.gain": 0.0, | |
| "distortion_amount": 0.0, | |
| "delay.delay_time": 0.02, | |
| "delay.feedback": 0.3, | |
| "delay.mix": 0.2, | |
| "final_wet_mix": 0.5 | |
| } | |
| # ์คํ์ผ ํ๋ฆฌ์ (AI ์์ด๋ ์๋) | |
| STYLE_PRESETS = { | |
| "warm": { | |
| "eq_lowshelf.params.gain": 3.0, | |
| "eq_highshelf.params.gain": -1.0, | |
| "distortion_amount": 0.05, | |
| }, | |
| "bright": { | |
| "eq_highshelf.params.gain": 4.0, | |
| "eq_peak2.params.gain": 2.0, | |
| "eq_lowshelf.params.gain": -1.0, | |
| }, | |
| "vintage": { | |
| "eq_lowshelf.params.gain": 2.0, | |
| "eq_highshelf.params.gain": -2.0, | |
| "distortion_amount": 0.1, | |
| "delay.mix": 0.15, | |
| }, | |
| "modern": { | |
| "eq_peak1.params.gain": 2.0, | |
| "eq_peak2.params.gain": 3.0, | |
| "eq_highshelf.params.gain": 2.0, | |
| }, | |
| "spacious": { | |
| "delay.delay_time": 0.05, | |
| "delay.feedback": 0.4, | |
| "delay.mix": 0.35, | |
| }, | |
| "dry": { | |
| "final_wet_mix": 0.2, | |
| "delay.mix": 0.0, | |
| }, | |
| "saturated": { | |
| "distortion_amount": 0.15, | |
| "eq_lowshelf.params.gain": 1.0, | |
| } | |
| } | |
| class CLAPAudioEncoder: | |
| """ | |
| CLAP ๊ธฐ๋ฐ ์ค๋์ค ์ธ์ฝ๋ (ํ์ต ์์ ๋์ผ) | |
| laion/larger_clap_music ๋ชจ๋ธ ์ฌ์ฉ, 512โ64 pooling | |
| """ | |
| def __init__(self, output_dim: int = 64, model_name: str = "laion/larger_clap_music"): | |
| self.output_dim = output_dim | |
| self.model_name = model_name | |
| self.target_sr = 48000 # CLAP์ 48kHz ์ฌ์ฉ | |
| self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| self.model = None | |
| self.processor = None | |
| self._load_model() | |
| def _load_model(self): | |
| """CLAP ๋ชจ๋ธ ๋ก๋""" | |
| try: | |
| from transformers import ClapModel, ClapProcessor | |
| print(f"[CLAPEncoder] CLAP ๋ชจ๋ธ ๋ก๋ฉ ์ค: {self.model_name}") | |
| self.processor = ClapProcessor.from_pretrained(self.model_name) | |
| self.model = ClapModel.from_pretrained(self.model_name) | |
| self.model = self.model.to(self.device) | |
| self.model.eval() | |
| print(f"[CLAPEncoder] โ CLAP ๋ชจ๋ธ ๋ก๋ ์๋ฃ (512โ{self.output_dim} pooling)") | |
| except ImportError: | |
| print("[CLAPEncoder] โ transformers ๋ฏธ์ค์น") | |
| print(" pip install transformers") | |
| except Exception as e: | |
| print(f"[CLAPEncoder] โ ๋ชจ๋ธ ๋ก๋ ์คํจ: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| def get_audio_features(self, audio_path: str) -> List[float]: | |
| """ | |
| ์ค๋์ค ํ์ผ์์ 64์ฐจ์ ํน์ง ๋ฒกํฐ ์ถ์ถ (ํ์ต๊ณผ ๋์ผํ ๋ฐฉ์) | |
| """ | |
| if self.model is None: | |
| print("[CLAPEncoder] ๋ชจ๋ธ์ด ๋ก๋๋์ง ์์, ๋น ํน์ง ๋ฐํ") | |
| return [0.0] * self.output_dim | |
| try: | |
| import librosa | |
| # 1. ์ค๋์ค ๋ก๋ (48kHz๋ก ๋ฆฌ์ํ๋ง - CLAP ์๊ตฌ์ฌํญ) | |
| audio, sr = librosa.load(audio_path, sr=self.target_sr, mono=True) | |
| # 2. CLAP ์ ๋ ฅ ์ค๋น | |
| inputs = self.processor( | |
| audios=audio, | |
| sampling_rate=self.target_sr, | |
| return_tensors="pt", | |
| padding=True | |
| ).to(self.device) | |
| # 3. ํน์ง ์ถ์ถ | |
| with torch.no_grad(): | |
| outputs = self.model.get_audio_features(**inputs) | |
| # [1, 512] ํํ์ ํ ์ | |
| features_512 = outputs[0].cpu().numpy() | |
| # 4. 512 โ 64 ์ฐจ์ ์ถ์ (ํ๊ท ํ๋ง, ํ์ต๊ณผ ๋์ผ) | |
| features_64 = self._reduce_dimension(features_512) | |
| return features_64.tolist() | |
| except Exception as e: | |
| print(f"[CLAPEncoder] ํน์ง ์ถ์ถ ์คํจ: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| return [0.0] * self.output_dim | |
| def _reduce_dimension(self, features: np.ndarray) -> np.ndarray: | |
| """512์ฐจ์ โ 64์ฐจ์ ํ๊ท ํ๋ง (ํ์ต๊ณผ ๋์ผํ ๋ฐฉ์)""" | |
| current_dim = len(features) | |
| if current_dim == self.output_dim: | |
| return features | |
| # ํ๊ท ํ๋ง: 8๊ฐ์ฉ ๋ฌถ์ด์ ํ๊ท (512 / 64 = 8) | |
| pool_size = current_dim // self.output_dim | |
| remainder = current_dim % self.output_dim | |
| pooled = [] | |
| idx = 0 | |
| for i in range(self.output_dim): | |
| size = pool_size + (1 if i < remainder else 0) | |
| pooled.append(np.mean(features[idx:idx+size])) | |
| idx += size | |
| return np.array(pooled) | |
| def is_loaded(self) -> bool: | |
| return self.model is not None | |
| class AIEffector: | |
| """AI ๊ธฐ๋ฐ ์ดํํฐ ํ๋ผ๋ฏธํฐ ์์ธก (V2: ํ์ต๊ณผ ๋์ผํ ์ค์ )""" | |
| def __init__( | |
| self, | |
| model_repo_id: str = "heybaeheef/KU_SW_Academy", | |
| model_subfolder: str = "checkpoints", | |
| base_model_name: str = "Qwen/Qwen3-8B", | |
| audio_feature_dim: int = 64, | |
| use_huggingface: bool = True | |
| ): | |
| self.model_repo_id = model_repo_id | |
| self.model_subfolder = model_subfolder | |
| self.base_model_name = base_model_name | |
| self.audio_feature_dim = audio_feature_dim | |
| self.use_huggingface = use_huggingface | |
| self.model = None | |
| self.tokenizer = None | |
| self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| # โ โ โ ํต์ฌ ์์ : CLAP ์ค๋์ค ์ธ์ฝ๋ ์ฌ์ฉ (ํ์ต๊ณผ ๋์ผ) โ โ โ | |
| print(f"[AIEffector] CLAP ์ค๋์ค ์ธ์ฝ๋ ์ด๊ธฐํ ์ค...") | |
| self.audio_encoder = CLAPAudioEncoder(output_dim=audio_feature_dim) | |
| # ์์ฒญ ์นด์ดํฐ | |
| self.request_count = 0 | |
| # ๋ชจ๋ธ ๋ก๋ ์๋ | |
| self._load_model() | |
| def _load_model(self): | |
| """๋ชจ๋ธ ๋ก๋""" | |
| try: | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig | |
| from peft import PeftModel | |
| print(f"[AIEffector] ๋ฒ ์ด์ค ๋ชจ๋ธ ๋ก๋ฉ ์ค: {self.base_model_name}") | |
| # 4bit ์์ํ ์ค์ | |
| if torch.cuda.is_available(): | |
| bnb_config = BitsAndBytesConfig( | |
| load_in_4bit=True, | |
| bnb_4bit_quant_type="nf4", | |
| bnb_4bit_compute_dtype=torch.float16, | |
| bnb_4bit_use_double_quant=True | |
| ) | |
| base_model = AutoModelForCausalLM.from_pretrained( | |
| self.base_model_name, | |
| quantization_config=bnb_config, | |
| device_map="auto", | |
| trust_remote_code=True | |
| ) | |
| else: | |
| base_model = AutoModelForCausalLM.from_pretrained( | |
| self.base_model_name, | |
| torch_dtype=torch.float32, | |
| device_map="auto", | |
| trust_remote_code=True | |
| ) | |
| self.tokenizer = AutoTokenizer.from_pretrained( | |
| self.base_model_name, | |
| trust_remote_code=True | |
| ) | |
| if self.tokenizer.pad_token is None: | |
| self.tokenizer.pad_token = self.tokenizer.eos_token | |
| print(f"[AIEffector] LoRA ์ด๋ํฐ ๋ก๋ฉ ์ค...") | |
| if self.use_huggingface: | |
| print(f"[AIEffector] HuggingFace์์ LoRA ๋ก๋ฉ: {self.model_repo_id}/{self.model_subfolder}") | |
| self.model = PeftModel.from_pretrained( | |
| base_model, | |
| self.model_repo_id, | |
| subfolder=self.model_subfolder, | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
| ) | |
| else: | |
| local_path = os.path.join(self.model_repo_id, self.model_subfolder) | |
| print(f"[AIEffector] ๋ก์ปฌ์์ LoRA ์ด๋ํฐ ๋ก๋ฉ: {local_path}") | |
| self.model = PeftModel.from_pretrained( | |
| base_model, | |
| local_path, | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
| ) | |
| self.model.eval() | |
| print(f"[AIEffector] โ ๋ชจ๋ธ ๋ก๋ ์ฑ๊ณต!") | |
| except Exception as e: | |
| print(f"[AIEffector] โ ๋ชจ๋ธ ๋ก๋ ์คํจ: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| print(f"[AIEffector] ํด๋ฐฑ ๋ชจ๋๋ก ์ ํ (ํ๋ฆฌ์ ๊ธฐ๋ฐ)") | |
| self.model = None | |
| self.tokenizer = None | |
| def is_loaded(self) -> bool: | |
| """๋ชจ๋ธ ๋ก๋ ์ฌ๋ถ""" | |
| return self.model is not None | |
| def _apply_preset(self, prompt: str) -> Dict[str, float]: | |
| """ํ๋กฌํํธ์์ ํ๋ฆฌ์ ๋งค์นญ""" | |
| params = DEFAULT_PARAMETERS.copy() | |
| prompt_lower = prompt.lower() | |
| matched_presets = [] | |
| for style_name, style_params in STYLE_PRESETS.items(): | |
| if style_name in prompt_lower: | |
| params.update(style_params) | |
| matched_presets.append(style_name) | |
| if matched_presets: | |
| print(f" [Preset] ๋งค์นญ๋ ํ๋ฆฌ์ : {matched_presets}") | |
| return params | |
| def _format_prompt(self, text_prompt: str, audio_features: List[float]) -> str: | |
| """ | |
| โ โ โ ํต์ฌ ์์ : ํ์ต ์์ ๋์ผํ ํ๋กฌํํธ ํ์ ์ฌ์ฉ โ โ โ | |
| train_model.py์ 243-246์ค๊ณผ ๋์ผํ ํ์ | |
| """ | |
| audio_state_str = json.dumps(audio_features) | |
| # ํ์ต ์์ ์์ ํ ๋์ผํ ํ์! | |
| prompt = f"""Task: Convert text to audio parameters. | |
| Audio: {audio_state_str} | |
| Text: {text_prompt} | |
| Parameters:""" | |
| return prompt | |
| def _parse_output(self, output_text: str) -> Dict[str, float]: | |
| """LLM ์ถ๋ ฅ์์ ํ๋ผ๋ฏธํฐ ์ถ์ถ (ํฅ์๋ ๋ฒ์ )""" | |
| print(f" [Parse] Raw output ๊ธธ์ด: {len(output_text)} ๋ฌธ์") | |
| try: | |
| text = output_text | |
| # 1. <think>...</think> ํ๊ทธ ์ ๊ฑฐ (Qwen3 thinking mode) | |
| text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL) | |
| # 2. ๋งํฌ๋ค์ด ์ฝ๋๋ธ๋ก ์ถ์ถ | |
| code_block_match = re.search(r'```(?:json)?\s*([\s\S]*?)```', text) | |
| if code_block_match: | |
| text = code_block_match.group(1) | |
| print(f" [Parse] ์ฝ๋๋ธ๋ก์์ JSON ์ถ์ถ") | |
| # 3. JSON ๊ฐ์ฒด ์ฐพ๊ธฐ (์ค์ฒฉ ๋ธ๋ ์ด์ค ์ง์) | |
| json_str = self._extract_json_object(text) | |
| if json_str: | |
| print(f" [Parse] ์ถ์ถ๋ JSON (์ฒ์ 200์):\n{json_str[:200]}...") | |
| # 4. JSON ์ ์ฒ๋ฆฌ | |
| json_str = self._preprocess_json(json_str) | |
| # 5. ํ์ฑ ์๋ | |
| params = json.loads(json_str) | |
| # 6. ๊ฒฐ๊ณผ ๊ฒ์ฆ ๋ฐ ๋งคํ | |
| result = DEFAULT_PARAMETERS.copy() | |
| for key, value in params.items(): | |
| # ํค ์ ๊ทํ (๋์๋ฌธ์ ์ฒ๋ฆฌ) | |
| normalized_key = self._normalize_key(key) | |
| if normalized_key in result: | |
| try: | |
| result[normalized_key] = float(value) | |
| except (ValueError, TypeError): | |
| pass | |
| print(f" [Parse] โ ํ์ฑ ์ฑ๊ณต! {len(params)}๊ฐ ํ๋ผ๋ฏธํฐ ์ถ์ถ") | |
| return result | |
| else: | |
| print(f" [Parse] โ JSON ๊ฐ์ฒด๋ฅผ ์ฐพ์ ์ ์์") | |
| except json.JSONDecodeError as e: | |
| print(f" [Parse] โ JSON ํ์ฑ ์๋ฌ: {e}") | |
| if json_str: | |
| print(f" [Parse] ๋ฌธ์ ์์น ๊ทผ์ฒ: ...{json_str[max(0, e.pos-20):e.pos+20]}...") | |
| except Exception as e: | |
| print(f" [Parse] โ ์์ธ ๋ฐ์: {e}") | |
| print(f" [Parse] โ ๏ธ ๊ธฐ๋ณธ๊ฐ์ผ๋ก ํด๋ฐฑ") | |
| return DEFAULT_PARAMETERS.copy() | |
| def _normalize_key(self, key: str) -> str: | |
| """ํ๋ผ๋ฏธํฐ ํค ์ ๊ทํ (๋์๋ฌธ์ ์ฒ๋ฆฌ)""" | |
| # Q/q ์ ๊ทํ | |
| if key.endswith('.q'): | |
| return key[:-2] + '.Q' | |
| return key | |
| def _extract_json_object(self, text: str) -> Optional[str]: | |
| """ํ ์คํธ์์ JSON ๊ฐ์ฒด ์ถ์ถ (์ค์ฒฉ ๋ธ๋ ์ด์ค ์ง์)""" | |
| start = text.find('{') | |
| if start == -1: | |
| return None | |
| depth = 0 | |
| for i, char in enumerate(text[start:], start): | |
| if char == '{': | |
| depth += 1 | |
| elif char == '}': | |
| depth -= 1 | |
| if depth == 0: | |
| return text[start:i+1] | |
| return None | |
| def _preprocess_json(self, json_str: str) -> str: | |
| """JSON ๋ฌธ์์ด ์ ์ฒ๋ฆฌ""" | |
| # Trailing comma ์ ๊ฑฐ | |
| json_str = re.sub(r',(\s*[}\]])', r'\1', json_str) | |
| # NaN, Infinity ์ฒ๋ฆฌ | |
| json_str = re.sub(r'\bNaN\b', '0', json_str) | |
| json_str = re.sub(r'\bInfinity\b', '999999', json_str) | |
| json_str = re.sub(r'-Infinity\b', '-999999', json_str) | |
| return json_str | |
| def predict(self, audio_path: str, text_prompt: str = "") -> Dict[str, float]: | |
| """ํ๋ผ๋ฏธํฐ ์์ธก""" | |
| self.request_count += 1 | |
| timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| print(f"\n{'='*60}") | |
| print(f"[AIEffector] ๐ต ์์ฒญ #{self.request_count} - {timestamp}") | |
| print(f"{'='*60}") | |
| print(f" ๐ ์ค๋์ค ํ์ผ: {Path(audio_path).name}") | |
| print(f" ๐ฌ ํ ์คํธ ํ๋กฌํํธ: '{text_prompt}'") | |
| print(f" ๐ค ๋ชจ๋ธ ์ํ: {'AI ๋ชจ๋' if self.is_loaded() else 'ํ๋ฆฌ์ ๋ชจ๋'}") | |
| print(f" ๐ง ์ธ์ฝ๋: CLAP (ํ์ต๊ณผ ๋์ผ)") | |
| # ๋ชจ๋ธ์ด ์์ผ๋ฉด ํ๋ฆฌ์ ์ฌ์ฉ | |
| if not self.is_loaded(): | |
| print(f"\n โ ๏ธ AI ๋ชจ๋ธ ๋ฏธ๋ก๋ - ํ๋ฆฌ์ ๋ชจ๋ ์ฌ์ฉ") | |
| params = self._apply_preset(text_prompt) | |
| self._log_parameters(params) | |
| return self._convert_to_effect_chain_format(params) | |
| try: | |
| # 1. CLAP ์ค๋์ค ํน์ง ์ถ์ถ (ํ์ต๊ณผ ๋์ผ) | |
| print(f"\n ๐ [Step 1] CLAP ์ค๋์ค ํน์ง ์ถ์ถ ์ค...") | |
| audio_features = self.audio_encoder.get_audio_features(audio_path) | |
| if not audio_features or all(f == 0 for f in audio_features): | |
| print(f" โ ๏ธ ํน์ง ์ถ์ถ ์คํจ, ํ๋ฆฌ์ ์ผ๋ก ํด๋ฐฑ") | |
| params = self._apply_preset(text_prompt) | |
| self._log_parameters(params) | |
| return self._convert_to_effect_chain_format(params) | |
| print(f" โ {len(audio_features)}์ฐจ์ ํน์ง ์ถ์ถ ์๋ฃ") | |
| print(f" - ํน์ง ๋ฒกํฐ (์ฒ์ 8๊ฐ): {[round(v, 3) for v in audio_features[:8]]}") | |
| # 2. LLM ํ๋กฌํํธ ์์ฑ (ํ์ต๊ณผ ๋์ผํ ํ์) | |
| print(f"\n ๐ค [Step 2] LLM ํ๋กฌํํธ ์์ฑ ์ค (ํ์ต ํ์)...") | |
| prompt = self._format_prompt(text_prompt, audio_features) | |
| print(f" - ํ๋กฌํํธ ๊ธธ์ด: {len(prompt)} ๋ฌธ์") | |
| # 3. ํ ํฐํ | |
| print(f"\n ๐ข [Step 3] ํ ํฐํ ์ค...") | |
| inputs = self.tokenizer( | |
| prompt, | |
| return_tensors="pt", | |
| truncation=True, | |
| max_length=1500 # ํ์ต ์์ ๋์ผ | |
| ).to(self.device) | |
| print(f" - ์ ๋ ฅ ํ ํฐ ์: {inputs['input_ids'].shape[1]}") | |
| # 4. LLM ์์ฑ | |
| print(f"\n ๐ง [Step 4] LLM ์ถ๋ก ์ค...") | |
| import time | |
| start_time = time.time() | |
| with torch.no_grad(): | |
| outputs = self.model.generate( | |
| **inputs, | |
| max_new_tokens=500, | |
| do_sample=False, | |
| temperature=0.1, | |
| pad_token_id=self.tokenizer.pad_token_id, | |
| eos_token_id=self.tokenizer.eos_token_id, | |
| ) | |
| inference_time = time.time() - start_time | |
| print(f" - ์ถ๋ก ์๊ฐ: {inference_time:.2f}์ด") | |
| # 5. ๋์ฝ๋ฉ (์์ฑ๋ ๋ถ๋ถ๋ง) | |
| print(f"\n ๐ [Step 5] ์ถ๋ ฅ ๋์ฝ๋ฉ ์ค...") | |
| generated_tokens = outputs[0][inputs['input_ids'].shape[1]:] | |
| output_text = self.tokenizer.decode(generated_tokens, skip_special_tokens=True).strip() | |
| print(f" - LLM ์ถ๋ ฅ (์ฒ์ 300์):\n{output_text[:300]}") | |
| # 6. ํ์ฑ | |
| print(f"\n ๐ง [Step 6] ํ๋ผ๋ฏธํฐ ํ์ฑ ์ค...") | |
| params = self._parse_output(output_text) | |
| # 7. ๊ฒฐ๊ณผ ๋ก๊น | |
| self._log_parameters(params) | |
| print(f"\n โ AI ์์ธก ์๋ฃ!") | |
| print(f"{'='*60}\n") | |
| # effect_chain.py ํ์์ผ๋ก ๋ณํ | |
| return self._convert_to_effect_chain_format(params) | |
| except Exception as e: | |
| print(f"\n โ ์์ธก ์คํจ: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| print(f" โ ๏ธ ํ๋ฆฌ์ ์ผ๋ก ํด๋ฐฑ...") | |
| params = self._apply_preset(text_prompt) | |
| self._log_parameters(params) | |
| return self._convert_to_effect_chain_format(params) | |
| def _convert_to_effect_chain_format(self, params: Dict[str, float]) -> Dict[str, float]: | |
| """ | |
| ํ์ต ๋ฐ์ดํฐ ํ์ โ effect_chain.py ํ์์ผ๋ก ๋ณํ | |
| ์ฃผ๋ก Q/q ๋์๋ฌธ์ ์ฒ๋ฆฌ | |
| """ | |
| result = {} | |
| for key, value in params.items(): | |
| # Q โ q ๋ณํ (effect_chain.py๋ ์๋ฌธ์ q ์ฌ์ฉ) | |
| new_key = key.replace('.Q', '.q') | |
| result[new_key] = value | |
| return result | |
| def _log_parameters(self, params: Dict[str, float]): | |
| """์์ธก๋ ํ๋ผ๋ฏธํฐ ๋ก๊น """ | |
| print(f"\n ๐ ์์ธก๋ ํ๋ผ๋ฏธํฐ:") | |
| print(f" [EQ Peak 1]") | |
| print(f" - Freq: {params.get('eq_peak1.params.freq', 0):.1f} Hz") | |
| print(f" - Gain: {params.get('eq_peak1.params.gain', 0):.2f} dB") | |
| print(f" - Q: {params.get('eq_peak1.params.Q', params.get('eq_peak1.params.q', 0)):.2f}") | |
| print(f" [EQ Peak 2]") | |
| print(f" - Freq: {params.get('eq_peak2.params.freq', 0):.1f} Hz") | |
| print(f" - Gain: {params.get('eq_peak2.params.gain', 0):.2f} dB") | |
| print(f" - Q: {params.get('eq_peak2.params.Q', params.get('eq_peak2.params.q', 0)):.2f}") | |
| print(f" [Low Shelf]") | |
| print(f" - Freq: {params.get('eq_lowshelf.params.freq', 0):.1f} Hz") | |
| print(f" - Gain: {params.get('eq_lowshelf.params.gain', 0):.2f} dB") | |
| print(f" [High Shelf]") | |
| print(f" - Freq: {params.get('eq_highshelf.params.freq', 0):.1f} Hz") | |
| print(f" - Gain: {params.get('eq_highshelf.params.gain', 0):.2f} dB") | |
| print(f" [Effects]") | |
| print(f" - Distortion: {params.get('distortion_amount', 0):.3f}") | |
| print(f" - Delay Time: {params.get('delay.delay_time', 0):.3f}s") | |
| print(f" - Delay Feedback: {params.get('delay.feedback', 0):.2f}") | |
| print(f" - Delay Mix: {params.get('delay.mix', 0):.2f}") | |
| print(f" - Final Wet Mix: {params.get('final_wet_mix', 0):.2f}") | |