fpessanha commited on
Commit
83e5388
·
1 Parent(s): 626626c

Feat: Protype version of waveform region

Browse files
Files changed (1) hide show
  1. app.py +47 -5
app.py CHANGED
@@ -60,11 +60,51 @@ js_progress_bar = """
60
 
61
  var elem = document.getElementById("myBar");
62
  elem.style.width = n_ann/total_ann * 100 + "%";
63
- progressText.innerText = 'Completed: ' + n_ann + ' / ' + total_ann
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  }
65
  """
66
 
67
 
 
 
68
  intro_html = """
69
  <h1>Emotionality in Speech</h1>
70
 
@@ -116,6 +156,7 @@ intro_html = """
116
  </div>
117
  """
118
 
 
119
  # List of all audio files to annotate
120
  file_list = pd.read_excel(os.path.join('combined_annotations.xlsx'))
121
  total_annotations = len(file_list)
@@ -223,7 +264,8 @@ with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
223
  <div id="myBar">
224
  <span id="progressText">Press "Let's go!" to start</span>
225
  </div>
226
- </div>""", padding = False)
 
227
 
228
  # Row with audio player
229
  with gr.Row():
@@ -268,9 +310,9 @@ with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
268
 
269
 
270
  lets_go.click(None, [], [ann_completed, total], js = js_progress_bar)
271
- lets_go.click(deactivate_participant_id, [participant_id, lets_go], [participant_id, lets_go])
272
- lets_go.click(activate_elements, [emotions, confidence, comments, next_button, previous_button], [emotions, confidence, comments, next_button, previous_button])
273
- lets_go.click(load_example, inputs = [gr.Number(current_index["index"], visible = False)], outputs = [sentence_text, audio_player, emotions, confidence, ann_completed, comments])
274
 
275
 
276
  demo.launch()
 
60
 
61
  var elem = document.getElementById("myBar");
62
  elem.style.width = n_ann/total_ann * 100 + "%";
63
+ progressText.innerText = 'Completed: ' + n_ann + ' / ' + total_ann;
64
+
65
+ const waveform = document.querySelector('#waveform div');
66
+ const shadowRoot = waveform.shadowRoot;
67
+ const canvases = shadowRoot.querySelector('.wrapper');
68
+
69
+ console.log(canvases.offsetWidth)
70
+
71
+ const leftOffsetPct = 0.3;
72
+ const widthPct = 0.3;
73
+
74
+
75
+ // Create a style element for the shadow DOM
76
+ const style = document.createElement('style');
77
+ style.textContent = `
78
+ .wrapper::after {
79
+ content: '';
80
+ position: absolute;
81
+ top: 0;
82
+ left: ${canvases.offsetWidth * leftOffsetPct}px;
83
+ width: ${canvases.offsetWidth * widthPct}px;
84
+ height: 100%;
85
+ background-color: blue;
86
+ z-index: 999;
87
+ opacity: 0.5;
88
+ }
89
+
90
+ /* Ensure parent has positioning context */
91
+ .wrapper {
92
+ position: relative;
93
+ }
94
+ `;
95
+
96
+ // Append the style to the shadow root
97
+ shadowRoot.appendChild(style);
98
+
99
+ console.log('Added pseudo-element to canvases');
100
+
101
+
102
  }
103
  """
104
 
105
 
106
+
107
+
108
  intro_html = """
109
  <h1>Emotionality in Speech</h1>
110
 
 
156
  </div>
157
  """
158
 
159
+
160
  # List of all audio files to annotate
161
  file_list = pd.read_excel(os.path.join('combined_annotations.xlsx'))
162
  total_annotations = len(file_list)
 
264
  <div id="myBar">
265
  <span id="progressText">Press "Let's go!" to start</span>
266
  </div>
267
+ </div>
268
+ """, padding = False)
269
 
270
  # Row with audio player
271
  with gr.Row():
 
310
 
311
 
312
  lets_go.click(None, [], [ann_completed, total], js = js_progress_bar)
313
+ # lets_go.click(deactivate_participant_id, [participant_id, lets_go], [participant_id, lets_go])
314
+ # lets_go.click(activate_elements, [emotions, confidence, comments, next_button, previous_button], [emotions, confidence, comments, next_button, previous_button])
315
+ # lets_go.click(load_example, inputs = [gr.Number(current_index["index"], visible = False)], outputs = [sentence_text, audio_player, emotions, confidence, ann_completed, comments])
316
 
317
 
318
  demo.launch()