heybaeheef commited on
Commit
4336a6e
ยท
verified ยท
1 Parent(s): 3bfa04b

Delete models/ai_effector.py

Browse files
Files changed (1) hide show
  1. models/ai_effector.py +0 -482
models/ai_effector.py DELETED
@@ -1,482 +0,0 @@
1
- """
2
- AI Effector - DiffVox LLM ๊ธฐ๋ฐ˜ ์ดํŽ™ํŠธ ํŒŒ๋ผ๋ฏธํ„ฐ ์˜ˆ์ธก
3
- ===================================================
4
- ์ƒ์„ธ ๋กœ๊ทธ ๋ฒ„์ „
5
- """
6
-
7
- import os
8
- import json
9
- import re
10
- import torch
11
- import numpy as np
12
- from typing import Dict, List, Optional, Any
13
- from pathlib import Path
14
- from datetime import datetime
15
- import warnings
16
-
17
- warnings.filterwarnings("ignore")
18
-
19
- # ๊ธฐ๋ณธ ํŒŒ๋ผ๋ฏธํ„ฐ (๋ชจ๋ธ ๋กœ๋“œ ์‹คํŒจ ์‹œ ์‚ฌ์šฉ)
20
- DEFAULT_PARAMETERS = {
21
- "eq_peak1.params.freq": 1000.0,
22
- "eq_peak1.params.gain": 0.0,
23
- "eq_peak1.params.q": 1.0,
24
- "eq_peak2.params.freq": 4000.0,
25
- "eq_peak2.params.gain": 0.0,
26
- "eq_peak2.params.q": 1.0,
27
- "eq_lowshelf.params.freq": 200.0,
28
- "eq_lowshelf.params.gain": 0.0,
29
- "eq_lowshelf.params.q": 0.707,
30
- "eq_highshelf.params.freq": 8000.0,
31
- "eq_highshelf.params.gain": 0.0,
32
- "eq_highshelf.params.q": 0.707,
33
- "distortion_amount": 0.0,
34
- "delay.delay_time": 0.02,
35
- "delay.feedback": 0.3,
36
- "delay.mix": 0.2,
37
- "final_wet_mix": 0.5
38
- }
39
-
40
- # ์Šคํƒ€์ผ ํ”„๋ฆฌ์…‹ (AI ์—†์ด๋„ ์ž‘๋™)
41
- STYLE_PRESETS = {
42
- "warm": {
43
- "eq_lowshelf.params.gain": 3.0,
44
- "eq_highshelf.params.gain": -1.0,
45
- "distortion_amount": 0.05,
46
- },
47
- "bright": {
48
- "eq_highshelf.params.gain": 4.0,
49
- "eq_peak2.params.gain": 2.0,
50
- "eq_lowshelf.params.gain": -1.0,
51
- },
52
- "vintage": {
53
- "eq_lowshelf.params.gain": 2.0,
54
- "eq_highshelf.params.gain": -2.0,
55
- "distortion_amount": 0.1,
56
- "delay.mix": 0.15,
57
- },
58
- "modern": {
59
- "eq_peak1.params.gain": 2.0,
60
- "eq_peak2.params.gain": 3.0,
61
- "eq_highshelf.params.gain": 2.0,
62
- },
63
- "spacious": {
64
- "delay.delay_time": 0.05,
65
- "delay.feedback": 0.4,
66
- "delay.mix": 0.35,
67
- },
68
- "dry": {
69
- "final_wet_mix": 0.2,
70
- "delay.mix": 0.0,
71
- },
72
- "saturated": {
73
- "distortion_amount": 0.15,
74
- "eq_lowshelf.params.gain": 1.0,
75
- }
76
- }
77
-
78
-
79
- class AudioEncoder:
80
- """๊ฐ„์†Œํ™”๋œ ์˜ค๋””์˜ค ์ธ์ฝ”๋” (CLAP ๋Œ€์ฒด)"""
81
-
82
- def __init__(self, output_dim: int = 64):
83
- self.output_dim = output_dim
84
- self.sr = 44100
85
-
86
- def get_audio_features(self, audio_path: str) -> Dict:
87
- """์˜ค๋””์˜ค์—์„œ ํŠน์ง• ์ถ”์ถœ (์ƒ์„ธ ์ •๋ณด ํฌํ•จ)"""
88
- try:
89
- import librosa
90
-
91
- y, sr = librosa.load(audio_path, sr=self.sr, duration=5.0)
92
-
93
- # ๊ธฐ๋ณธ ์˜ค๋””์˜ค ์ •๋ณด
94
- duration = len(y) / sr
95
-
96
- # ๊ธฐ๋ณธ ํŠน์ง• ์ถ”์ถœ
97
- features = []
98
- feature_details = {}
99
-
100
- # MFCC (20๊ฐœ)
101
- mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=20)
102
- mfcc_mean = np.mean(mfcc, axis=1).tolist()
103
- features.extend(mfcc_mean)
104
- feature_details["mfcc_mean"] = [round(v, 4) for v in mfcc_mean[:5]] # ์ฒ˜์Œ 5๊ฐœ๋งŒ
105
-
106
- # Spectral features
107
- spectral_centroid = np.mean(librosa.feature.spectral_centroid(y=y, sr=sr))
108
- spectral_bandwidth = np.mean(librosa.feature.spectral_bandwidth(y=y, sr=sr))
109
- spectral_rolloff = np.mean(librosa.feature.spectral_rolloff(y=y, sr=sr))
110
-
111
- features.extend([spectral_centroid / 10000, spectral_bandwidth / 10000, spectral_rolloff / 10000])
112
- feature_details["spectral_centroid"] = round(spectral_centroid, 2)
113
- feature_details["spectral_bandwidth"] = round(spectral_bandwidth, 2)
114
- feature_details["spectral_rolloff"] = round(spectral_rolloff, 2)
115
-
116
- # RMS energy
117
- rms = np.mean(librosa.feature.rms(y=y))
118
- features.append(float(rms))
119
- feature_details["rms_energy"] = round(float(rms), 4)
120
-
121
- # Zero crossing rate
122
- zcr = np.mean(librosa.feature.zero_crossing_rate(y))
123
- features.append(float(zcr))
124
- feature_details["zero_crossing_rate"] = round(float(zcr), 4)
125
-
126
- # Chroma (12๊ฐœ)
127
- chroma = librosa.feature.chroma_stft(y=y, sr=sr)
128
- chroma_mean = np.mean(chroma, axis=1).tolist()
129
- features.extend(chroma_mean)
130
- feature_details["chroma_mean"] = [round(v, 4) for v in chroma_mean[:5]] # ์ฒ˜์Œ 5๊ฐœ๋งŒ
131
-
132
- # ํ”ผ์น˜ ์ถ”์ •
133
- pitches, magnitudes = librosa.piptrack(y=y, sr=sr)
134
- pitch_values = []
135
- for t in range(pitches.shape[1]):
136
- index = magnitudes[:, t].argmax()
137
- pitch = pitches[index, t]
138
- if pitch > 0:
139
- pitch_values.append(pitch)
140
- median_pitch = np.median(pitch_values) if pitch_values else 0
141
- feature_details["estimated_pitch_hz"] = round(float(median_pitch), 2)
142
-
143
- # ์Œ์ƒ‰ ๋ถ„์„
144
- if spectral_centroid > 3000:
145
- brightness = "bright"
146
- elif spectral_centroid > 1500:
147
- brightness = "neutral"
148
- else:
149
- brightness = "dark"
150
- feature_details["brightness"] = brightness
151
-
152
- # ์—๋„ˆ์ง€ ๋ถ„์„
153
- if rms > 0.1:
154
- intensity = "powerful"
155
- elif rms > 0.03:
156
- intensity = "moderate"
157
- else:
158
- intensity = "soft"
159
- feature_details["intensity"] = intensity
160
-
161
- # Pad or truncate to output_dim
162
- if len(features) < self.output_dim:
163
- features.extend([0.0] * (self.output_dim - len(features)))
164
- else:
165
- features = features[:self.output_dim]
166
-
167
- return {
168
- "features": features,
169
- "details": feature_details,
170
- "duration_sec": round(duration, 2),
171
- "sample_rate": sr
172
- }
173
-
174
- except Exception as e:
175
- print(f"[AudioEncoder] โŒ ํŠน์ง• ์ถ”์ถœ ์‹คํŒจ: {e}")
176
- return {
177
- "features": [0.0] * self.output_dim,
178
- "details": {"error": str(e)},
179
- "duration_sec": 0,
180
- "sample_rate": self.sr
181
- }
182
-
183
-
184
- class AIEffector:
185
- """AI ๊ธฐ๋ฐ˜ ์ดํŽ™ํ„ฐ ํŒŒ๋ผ๋ฏธํ„ฐ ์˜ˆ์ธก"""
186
-
187
- def __init__(
188
- self,
189
- model_repo_id: str = "heybaeheef/KU_SW_Academy",
190
- model_subfolder: str = "checkpoints",
191
- base_model_name: str = "Qwen/Qwen3-8B",
192
- audio_feature_dim: int = 64,
193
- use_huggingface: bool = True
194
- ):
195
- self.model_repo_id = model_repo_id
196
- self.model_subfolder = model_subfolder
197
- self.base_model_name = base_model_name
198
- self.audio_feature_dim = audio_feature_dim
199
- self.use_huggingface = use_huggingface
200
-
201
- self.model = None
202
- self.tokenizer = None
203
- self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
204
-
205
- # ์˜ค๋””์˜ค ์ธ์ฝ”๋”
206
- self.audio_encoder = AudioEncoder(output_dim=audio_feature_dim)
207
-
208
- # ์š”์ฒญ ์นด์šดํ„ฐ
209
- self.request_count = 0
210
-
211
- # ๋ชจ๋ธ ๋กœ๋“œ ์‹œ๋„
212
- self._load_model()
213
-
214
- def _load_model(self):
215
- """๋ชจ๋ธ ๋กœ๋“œ"""
216
- try:
217
- from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
218
- from peft import PeftModel
219
-
220
- print(f"[AIEffector] ๋ชจ๋ธ ๋กœ๋”ฉ ์‹œ์ž‘...")
221
- print(f" - Base Model: {self.base_model_name}")
222
- print(f" - Adapter Repo: {self.model_repo_id}")
223
- print(f" - Adapter Subfolder: {self.model_subfolder}")
224
-
225
- # ํ† ํฌ๋‚˜์ด์ € ๋กœ๋“œ
226
- self.tokenizer = AutoTokenizer.from_pretrained(
227
- self.base_model_name,
228
- trust_remote_code=True
229
- )
230
- if self.tokenizer.pad_token is None:
231
- self.tokenizer.pad_token = self.tokenizer.eos_token
232
-
233
- # 4bit ์–‘์žํ™” ์„ค์ • (๋ฉ”๋ชจ๋ฆฌ ์ ˆ์•ฝ)
234
- quantization_config = None
235
- if torch.cuda.is_available():
236
- try:
237
- quantization_config = BitsAndBytesConfig(
238
- load_in_4bit=True,
239
- bnb_4bit_compute_dtype=torch.float16,
240
- bnb_4bit_use_double_quant=True,
241
- bnb_4bit_quant_type="nf4"
242
- )
243
- print(f" - 4bit ์–‘์žํ™” ํ™œ์„ฑํ™”")
244
- except Exception as e:
245
- print(f" - 4bit ์–‘์žํ™” ์‹คํŒจ, ๊ธฐ๋ณธ ๋กœ๋“œ: {e}")
246
-
247
- # ๋ฒ ์ด์Šค ๋ชจ๋ธ ๋กœ๋“œ
248
- base_model = AutoModelForCausalLM.from_pretrained(
249
- self.base_model_name,
250
- quantization_config=quantization_config,
251
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
252
- device_map="auto" if torch.cuda.is_available() else None,
253
- trust_remote_code=True,
254
- low_cpu_mem_usage=True
255
- )
256
-
257
- # LoRA ์–ด๋Œ‘ํ„ฐ ๋กœ๋“œ (subfolder ํŒŒ๋ผ๋ฏธํ„ฐ ์‚ฌ์šฉ!)
258
- if self.use_huggingface:
259
- print(f"[AIEffector] Hugging Face์—์„œ LoRA ์–ด๋Œ‘ํ„ฐ ๋กœ๋”ฉ...")
260
- self.model = PeftModel.from_pretrained(
261
- base_model,
262
- self.model_repo_id,
263
- subfolder=self.model_subfolder,
264
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
265
- )
266
- else:
267
- local_path = os.path.join(self.model_repo_id, self.model_subfolder)
268
- print(f"[AIEffector] ๋กœ์ปฌ์—์„œ LoRA ์–ด๋Œ‘ํ„ฐ ๋กœ๋”ฉ: {local_path}")
269
- self.model = PeftModel.from_pretrained(
270
- base_model,
271
- local_path,
272
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
273
- )
274
-
275
- self.model.eval()
276
- print(f"[AIEffector] โœ… ๋ชจ๋ธ ๋กœ๋“œ ์„ฑ๊ณต!")
277
-
278
- except Exception as e:
279
- print(f"[AIEffector] โŒ ๋ชจ๋ธ ๋กœ๋“œ ์‹คํŒจ: {e}")
280
- print(f"[AIEffector] ํด๋ฐฑ ๋ชจ๋“œ๋กœ ์ „ํ™˜ (ํ”„๋ฆฌ์…‹ ๊ธฐ๋ฐ˜)")
281
- self.model = None
282
- self.tokenizer = None
283
-
284
- def is_loaded(self) -> bool:
285
- """๋ชจ๋ธ ๋กœ๋“œ ์—ฌ๋ถ€"""
286
- return self.model is not None
287
-
288
- def _apply_preset(self, prompt: str) -> Dict[str, float]:
289
- """ํ”„๋กฌํ”„ํŠธ์—์„œ ํ”„๋ฆฌ์…‹ ๋งค์นญ"""
290
- params = DEFAULT_PARAMETERS.copy()
291
- prompt_lower = prompt.lower()
292
-
293
- matched_presets = []
294
- for style_name, style_params in STYLE_PRESETS.items():
295
- if style_name in prompt_lower:
296
- params.update(style_params)
297
- matched_presets.append(style_name)
298
-
299
- if matched_presets:
300
- print(f" [Preset] ๋งค์นญ๋œ ํ”„๋ฆฌ์…‹: {matched_presets}")
301
-
302
- return params
303
-
304
- def _format_prompt(self, text_prompt: str, audio_features: List[float]) -> str:
305
- """LLM ์ž…๋ ฅ ํ”„๋กฌํ”„ํŠธ ํฌ๋งทํŒ…"""
306
- audio_summary = ", ".join([f"{v:.3f}" for v in audio_features[:8]])
307
-
308
- prompt = f"""You are an audio effect parameter predictor.
309
-
310
- Input:
311
- - Text description: {text_prompt}
312
- - Audio features (first 8): [{audio_summary}]
313
-
314
- Output the effect parameters as JSON:
315
- ```json
316
- {{
317
- "eq_peak1.params.freq": <float>,
318
- "eq_peak1.params.gain": <float>,
319
- "eq_peak1.params.q": <float>,
320
- "eq_peak2.params.freq": <float>,
321
- "eq_peak2.params.gain": <float>,
322
- "eq_peak2.params.q": <float>,
323
- "eq_lowshelf.params.freq": <float>,
324
- "eq_lowshelf.params.gain": <float>,
325
- "eq_lowshelf.params.q": <float>,
326
- "eq_highshelf.params.freq": <float>,
327
- "eq_highshelf.params.gain": <float>,
328
- "eq_highshelf.params.q": <float>,
329
- "distortion_amount": <float>,
330
- "delay.delay_time": <float>,
331
- "delay.feedback": <float>,
332
- "delay.mix": <float>,
333
- "final_wet_mix": <float>
334
- }}
335
- ```
336
-
337
- JSON output:"""
338
-
339
- return prompt
340
-
341
- def _parse_output(self, output_text: str) -> Dict[str, float]:
342
- """LLM ์ถœ๋ ฅ์—์„œ ํŒŒ๋ผ๋ฏธํ„ฐ ์ถ”์ถœ"""
343
- try:
344
- json_match = re.search(r'\{[^{}]*\}', output_text, re.DOTALL)
345
- if json_match:
346
- params = json.loads(json_match.group())
347
-
348
- result = DEFAULT_PARAMETERS.copy()
349
- for key, value in params.items():
350
- if key in result and isinstance(value, (int, float)):
351
- result[key] = float(value)
352
-
353
- return result
354
- except Exception as e:
355
- print(f" [Parse] โŒ ์ถœ๋ ฅ ํŒŒ์‹ฑ ์‹คํŒจ: {e}")
356
-
357
- return DEFAULT_PARAMETERS.copy()
358
-
359
- def predict(self, audio_path: str, text_prompt: str = "") -> Dict[str, float]:
360
- """ํŒŒ๋ผ๋ฏธํ„ฐ ์˜ˆ์ธก (์ƒ์„ธ ๋กœ๊ทธ ํฌํ•จ)"""
361
-
362
- self.request_count += 1
363
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
364
-
365
- print(f"\n{'='*60}")
366
- print(f"[AIEffector] ๐ŸŽต ์š”์ฒญ #{self.request_count} - {timestamp}")
367
- print(f"{'='*60}")
368
- print(f" ๐Ÿ“‚ ์˜ค๋””์˜ค ํŒŒ์ผ: {Path(audio_path).name}")
369
- print(f" ๐Ÿ’ฌ ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ: '{text_prompt}'")
370
- print(f" ๐Ÿค– ๋ชจ๋ธ ์ƒํƒœ: {'AI ๋ชจ๋“œ' if self.is_loaded() else 'ํ”„๋ฆฌ์…‹ ๋ชจ๋“œ'}")
371
-
372
- # ๋ชจ๋ธ์ด ์—†์œผ๋ฉด ํ”„๋ฆฌ์…‹ ์‚ฌ์šฉ
373
- if not self.is_loaded():
374
- print(f"\n โš ๏ธ AI ๋ชจ๋ธ ๋ฏธ๋กœ๋“œ - ํ”„๋ฆฌ์…‹ ๋ชจ๋“œ ์‚ฌ์šฉ")
375
- params = self._apply_preset(text_prompt)
376
- self._log_parameters(params)
377
- return params
378
-
379
- try:
380
- # 1. ์˜ค๋””์˜ค ํŠน์ง• ์ถ”์ถœ
381
- print(f"\n ๐Ÿ“Š [Step 1] ์˜ค๋””์˜ค ํŠน์ง• ์ถ”์ถœ ์ค‘...")
382
- audio_result = self.audio_encoder.get_audio_features(audio_path)
383
- audio_features = audio_result["features"]
384
- audio_details = audio_result["details"]
385
-
386
- print(f" - ์˜ค๋””์˜ค ๊ธธ์ด: {audio_result['duration_sec']}์ดˆ")
387
- print(f" - ์ƒ˜ํ”Œ๋ ˆ์ดํŠธ: {audio_result['sample_rate']}Hz")
388
- print(f" - ์ถ”์ • ํ”ผ์น˜: {audio_details.get('estimated_pitch_hz', 'N/A')}Hz")
389
- print(f" - ๋ฐ๊ธฐ: {audio_details.get('brightness', 'N/A')}")
390
- print(f" - ๊ฐ•๋„: {audio_details.get('intensity', 'N/A')}")
391
- print(f" - Spectral Centroid: {audio_details.get('spectral_centroid', 'N/A')}")
392
- print(f" - RMS Energy: {audio_details.get('rms_energy', 'N/A')}")
393
- print(f" - ํŠน์ง• ๋ฒกํ„ฐ (์ฒ˜์Œ 8๊ฐœ): {[round(v, 3) for v in audio_features[:8]]}")
394
-
395
- # 2. LLM ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ
396
- print(f"\n ๐Ÿ”ค [Step 2] LLM ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ ์ค‘...")
397
- prompt = self._format_prompt(text_prompt, audio_features)
398
- print(f" - ํ”„๋กฌํ”„ํŠธ ๊ธธ์ด: {len(prompt)} ๋ฌธ์ž")
399
-
400
- # 3. ํ† ํฐํ™”
401
- print(f"\n ๐Ÿ”ข [Step 3] ํ† ํฐํ™” ์ค‘...")
402
- inputs = self.tokenizer(
403
- prompt,
404
- return_tensors="pt",
405
- truncation=True,
406
- max_length=1024
407
- ).to(self.device)
408
- print(f" - ์ž…๋ ฅ ํ† ํฐ ์ˆ˜: {inputs['input_ids'].shape[1]}")
409
-
410
- # 4. LLM ์ƒ์„ฑ
411
- print(f"\n ๐Ÿง  [Step 4] LLM ์ถ”๋ก  ์ค‘...")
412
- import time
413
- start_time = time.time()
414
-
415
- with torch.no_grad():
416
- outputs = self.model.generate(
417
- **inputs,
418
- max_new_tokens=256,
419
- do_sample=False,
420
- temperature=0.1,
421
- pad_token_id=self.tokenizer.pad_token_id
422
- )
423
-
424
- inference_time = time.time() - start_time
425
- print(f" - ์ถ”๋ก  ์‹œ๊ฐ„: {inference_time:.2f}์ดˆ")
426
- print(f" - ์ถœ๋ ฅ ํ† ํฐ ์ˆ˜: {outputs.shape[1]}")
427
-
428
- # 5. ๋””์ฝ”๋”ฉ
429
- print(f"\n ๐Ÿ“ [Step 5] ์ถœ๋ ฅ ๋””์ฝ”๋”ฉ ์ค‘...")
430
- output_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
431
-
432
- # JSON ๋ถ€๋ถ„๋งŒ ์ถ”์ถœํ•ด์„œ ๋กœ๊ทธ
433
- json_match = re.search(r'\{[^{}]*\}', output_text, re.DOTALL)
434
- if json_match:
435
- print(f" - LLM ์ถœ๋ ฅ JSON:\n{json_match.group()}")
436
-
437
- # 6. ํŒŒ์‹ฑ
438
- print(f"\n ๐Ÿ”ง [Step 6] ํŒŒ๋ผ๋ฏธํ„ฐ ํŒŒ์‹ฑ ์ค‘...")
439
- params = self._parse_output(output_text)
440
-
441
- # 7. ๊ฒฐ๊ณผ ๋กœ๊น…
442
- self._log_parameters(params)
443
-
444
- print(f"\n โœ… AI ์˜ˆ์ธก ์™„๋ฃŒ!")
445
- print(f"{'='*60}\n")
446
-
447
- return params
448
-
449
- except Exception as e:
450
- print(f"\n โŒ ์˜ˆ์ธก ์‹คํŒจ: {e}")
451
- print(f" โš ๏ธ ํ”„๋ฆฌ์…‹์œผ๋กœ ํด๋ฐฑ...")
452
- params = self._apply_preset(text_prompt)
453
- self._log_parameters(params)
454
- return params
455
-
456
- def _log_parameters(self, params: Dict[str, float]):
457
- """์˜ˆ์ธก๋œ ํŒŒ๋ผ๋ฏธํ„ฐ ๋กœ๊น…"""
458
- print(f"\n ๐Ÿ“‹ ์˜ˆ์ธก๋œ ํŒŒ๋ผ๋ฏธํ„ฐ:")
459
- print(f" [EQ Peak 1]")
460
- print(f" - Freq: {params.get('eq_peak1.params.freq', 0):.1f} Hz")
461
- print(f" - Gain: {params.get('eq_peak1.params.gain', 0):.2f} dB")
462
- print(f" - Q: {params.get('eq_peak1.params.q', 0):.2f}")
463
-
464
- print(f" [EQ Peak 2]")
465
- print(f" - Freq: {params.get('eq_peak2.params.freq', 0):.1f} Hz")
466
- print(f" - Gain: {params.get('eq_peak2.params.gain', 0):.2f} dB")
467
- print(f" - Q: {params.get('eq_peak2.params.q', 0):.2f}")
468
-
469
- print(f" [Low Shelf]")
470
- print(f" - Freq: {params.get('eq_lowshelf.params.freq', 0):.1f} Hz")
471
- print(f" - Gain: {params.get('eq_lowshelf.params.gain', 0):.2f} dB")
472
-
473
- print(f" [High Shelf]")
474
- print(f" - Freq: {params.get('eq_highshelf.params.freq', 0):.1f} Hz")
475
- print(f" - Gain: {params.get('eq_highshelf.params.gain', 0):.2f} dB")
476
-
477
- print(f" [Effects]")
478
- print(f" - Distortion: {params.get('distortion_amount', 0):.3f}")
479
- print(f" - Delay Time: {params.get('delay.delay_time', 0):.3f}s")
480
- print(f" - Delay Feedback: {params.get('delay.feedback', 0):.2f}")
481
- print(f" - Delay Mix: {params.get('delay.mix', 0):.2f}")
482
- print(f" - Final Wet Mix: {params.get('final_wet_mix', 0):.2f}")