yoyolicoris commited on
Commit
32e9732
·
1 Parent(s): 85830b2

feat: visualisation

Browse files
Files changed (1) hide show
  1. app.py +54 -7
app.py CHANGED
@@ -155,6 +155,7 @@ to_fx_state_dict = lambda x: {
155
  k: v[0] if ndim_dict[k] == 0 else v for k, v in vec2dict(x).items()
156
  }
157
 
 
158
  meter = pyln.Meter(44100)
159
 
160
 
@@ -224,6 +225,9 @@ def inference(
224
  lr,
225
  progress=gr.Progress(track_tqdm=True),
226
  ):
 
 
 
227
  device = Path("DEVICE.txt").read_text()
228
  if method == "Mean":
229
  return gaussian_params_dict[dataset][0].to(device)
@@ -474,7 +478,7 @@ def plot_t60(fx):
474
  def vec2fx(x):
475
  fx = deepcopy(global_fx)
476
  fx.load_state_dict(vec2dict(x), strict=False)
477
- fx.apply(partial(clip_delay_eq_Q, Q=0.707))
478
  return fx
479
 
480
 
@@ -602,20 +606,57 @@ with gr.Blocks() as demo:
602
  interactive=True,
603
  )
604
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
605
  process_button.click(
606
  chain_functions(
607
- # lambda audio, ratio, x, *all_s: (
608
- # audio,
609
- # ratio,
610
- # # assign_fx_params(vec2fx(x), *all_s),
611
- # ),
612
  lambda audio, approx, ratio, *args: (
613
  audio,
614
  approx,
615
  ratio,
616
  inference(audio, *args),
617
  ),
618
- lambda audio, approx, ratio, vec: (*render(audio, approx, ratio, vec), vec),
 
 
 
 
 
 
 
 
 
 
 
619
  ),
620
  inputs=[
621
  audio_input,
@@ -637,6 +678,12 @@ with gr.Blocks() as demo:
637
  direct_output,
638
  wet_output,
639
  fx_params,
 
 
 
 
 
 
640
  ],
641
  )
642
 
 
155
  k: v[0] if ndim_dict[k] == 0 else v for k, v in vec2dict(x).items()
156
  }
157
 
158
+
159
  meter = pyln.Meter(44100)
160
 
161
 
 
225
  lr,
226
  progress=gr.Progress(track_tqdm=True),
227
  ):
228
+ # close all figures to avoid too many open figures
229
+ plt.close("all")
230
+
231
  device = Path("DEVICE.txt").read_text()
232
  if method == "Mean":
233
  return gaussian_params_dict[dataset][0].to(device)
 
478
  def vec2fx(x):
479
  fx = deepcopy(global_fx)
480
  fx.load_state_dict(vec2dict(x), strict=False)
481
+ # fx.apply(partial(clip_delay_eq_Q, Q=0.707))
482
  return fx
483
 
484
 
 
606
  interactive=True,
607
  )
608
 
609
+ _ = gr.Markdown("## Effect Parameters Visualisation")
610
+ with gr.Row():
611
+ peq_plot = gr.Plot(
612
+ plot_eq(global_fx), label="PEQ Frequency Response", elem_id="peq-plot"
613
+ )
614
+ comp_plot = gr.Plot(
615
+ plot_comp(global_fx), label="Compressor Curve", elem_id="comp-plot"
616
+ )
617
+
618
+ with gr.Row():
619
+ delay_plot = gr.Plot(
620
+ plot_delay(global_fx),
621
+ label="Delay Frequency Response",
622
+ elem_id="delay-plot",
623
+ )
624
+ reverb_plot = gr.Plot(
625
+ plot_reverb(global_fx),
626
+ label="Tone Correction PEQ",
627
+ elem_id="reverb-plot",
628
+ min_width=160,
629
+ )
630
+ t60_plot = gr.Plot(
631
+ plot_t60(global_fx), label="Decay Time", elem_id="t60-plot", min_width=160
632
+ )
633
+
634
+ _ = gr.Markdown("## Effect Settings JSON")
635
+ with gr.Row():
636
+ json_output = gr.JSON(
637
+ model2json(global_fx), label="Effect Settings", max_height=800, open=True
638
+ )
639
+
640
  process_button.click(
641
  chain_functions(
 
 
 
 
 
642
  lambda audio, approx, ratio, *args: (
643
  audio,
644
  approx,
645
  ratio,
646
  inference(audio, *args),
647
  ),
648
+ lambda audio, approx, ratio, vec: (
649
+ vec2fx(vec),
650
+ *render(audio, approx, ratio, vec),
651
+ vec,
652
+ ),
653
+ lambda fx, *args: (
654
+ *args,
655
+ *map(
656
+ lambda f: f(fx),
657
+ [plot_eq, plot_comp, plot_delay, plot_reverb, plot_t60, model2json],
658
+ ),
659
+ ),
660
  ),
661
  inputs=[
662
  audio_input,
 
678
  direct_output,
679
  wet_output,
680
  fx_params,
681
+ peq_plot,
682
+ comp_plot,
683
+ delay_plot,
684
+ reverb_plot,
685
+ t60_plot,
686
+ json_output,
687
  ],
688
  )
689