ndurner commited on
Commit
9188dd0
·
1 Parent(s): c74976e

problem/solution statement

Browse files
demo/app.py CHANGED
@@ -59,6 +59,10 @@ Think of this interface as a lightweight Jupyter notebook: instead of code cells
59
  """
60
  )
61
 
 
 
 
 
62
  gemini_key_box = render_setup_cell()
63
 
64
  with cell("👩🏻‍⚕️ Health check"):
 
59
  """
60
  )
61
 
62
+ render_problem_cell()
63
+
64
+ render_solution_cell()
65
+
66
  gemini_key_box = render_setup_cell()
67
 
68
  with cell("👩🏻‍⚕️ Health check"):
demo/assets/digitalgipfel.jpeg ADDED
demo/problem_cell.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import tempfile
4
+ from pathlib import Path
5
+ from urllib.parse import parse_qs, urlparse
6
+
7
+ import gradio as gr
8
+
9
+ try:
10
+ from yt_dlp import YoutubeDL
11
+ except ImportError: # pragma: no cover - yt-dlp is in requirements, but guard for clarity
12
+ YoutubeDL = None # type: ignore[assignment]
13
+
14
+ from layout import cell
15
+
16
+ DEFAULT_VIDEO_URL = "https://www.youtube.com/watch?v=Dvjg8R0jUAk"
17
+ SEARCH_TERM = "Notstaatsvertrag"
18
+ CORRECT_TERM = "NOOTS-Staatsvertrag"
19
+ SEARCH_LANGUAGES = ["de"]
20
+
21
+ HERE = Path(__file__).parent
22
+ ASSETS_DIR = HERE / "assets"
23
+ DIGITALGIPFEL_IMG = ASSETS_DIR / "digitalgipfel.jpeg"
24
+
25
+
26
+ def render_status_box(message: str, tone: str = "placeholder") -> str:
27
+ tone_class = {
28
+ "success": "health-success",
29
+ "fail": "health-fail",
30
+ "placeholder": "health-placeholder",
31
+ }.get(tone, "health-placeholder")
32
+ return f"<div class='health-box {tone_class}'>{message}</div>"
33
+
34
+
35
+ def _extract_video_id(video_url: str) -> str | None:
36
+ parsed = urlparse(video_url.strip())
37
+ if parsed.netloc.endswith("youtu.be"):
38
+ return parsed.path.lstrip("/") or None
39
+ if parsed.netloc.endswith("youtube.com"):
40
+ query = parse_qs(parsed.query)
41
+ if "v" in query and query["v"]:
42
+ return query["v"][0]
43
+ return None
44
+
45
+
46
+ def _fetch_transcript(video_url: str) -> tuple[str | None, str | None]:
47
+ if YoutubeDL is None: # pragma: no cover - dependency should always be present
48
+ return None, "yt-dlp is not installed in this environment."
49
+ video_id = _extract_video_id(video_url)
50
+ if not video_id:
51
+ return None, "That does not look like a valid YouTube URL with a video id."
52
+ with tempfile.TemporaryDirectory() as tmpdir:
53
+ output_template = str(Path(tmpdir) / "%(id)s.%(ext)s")
54
+ ydl_opts = {
55
+ "skip_download": True,
56
+ "writeautomaticsub": True,
57
+ "writesubtitles": False,
58
+ "subtitleslangs": SEARCH_LANGUAGES,
59
+ "subtitlesformat": "vtt",
60
+ "quiet": True,
61
+ "no_warnings": True,
62
+ "outtmpl": output_template,
63
+ "allow_playlist": False,
64
+ }
65
+ try:
66
+ with YoutubeDL(ydl_opts) as ydl:
67
+ ydl.download([video_url])
68
+ except Exception as exc: # noqa: BLE001 - expose yt-dlp failures to the UI
69
+ return None, f"Could not download auto captions via yt-dlp: {exc}"
70
+
71
+ caption_files = sorted(Path(tmpdir).glob("*.vtt"))
72
+ if not caption_files:
73
+ return None, (
74
+ "No German or English automatic captions were available for this video. "
75
+ "Try providing a different language variant or another clip."
76
+ )
77
+ text_chunks = []
78
+ for file in caption_files:
79
+ payload = file.read_text(encoding="utf-8", errors="replace")
80
+ cleaned = _vtt_to_text(payload)
81
+ if cleaned:
82
+ text_chunks.append(cleaned)
83
+
84
+ readable = " ".join(text_chunks).strip()
85
+ if not readable:
86
+ return None, "Transcript was empty. Try again or choose another video."
87
+ return readable, None
88
+
89
+
90
+ def _vtt_to_text(vtt_payload: str) -> str:
91
+ """Strip timestamps/cue indices from VTT so we can search plain text."""
92
+ cleaned_lines = []
93
+ for raw_line in vtt_payload.splitlines():
94
+ line = raw_line.strip()
95
+ if not line or line.upper().startswith("WEBVTT"):
96
+ continue
97
+ if "-->" in line: # timestamp cue
98
+ continue
99
+ if line.isdigit(): # cue index
100
+ continue
101
+ cleaned_lines.append(line)
102
+ return " ".join(cleaned_lines)
103
+
104
+
105
+ def analyze_transcript(video_url: str | None = None) -> tuple[str, str]:
106
+ transcript_text, error = _fetch_transcript(video_url or DEFAULT_VIDEO_URL)
107
+ if error:
108
+ return render_status_box(error, "fail"), ""
109
+
110
+ normalized = transcript_text.lower()
111
+ found_term = SEARCH_TERM.lower() in normalized
112
+
113
+ if found_term:
114
+ headline = (
115
+ f"🚨 We spotted “{SEARCH_TERM}” in this transcript — a hallucinated emergency-state framing."
116
+ )
117
+ tone = "fail"
118
+ else:
119
+ headline = (
120
+ f"✅ “{SEARCH_TERM}” does **not** show up in the transcript. "
121
+ f"The speaker consistently references {CORRECT_TERM}."
122
+ )
123
+ tone = "success"
124
+
125
+ result_line = (
126
+ "Result: the ASR output hallucinated an emergency-state treaty reference."
127
+ if found_term
128
+ else "Result: the captions stay with NOOTS – no emergency-state treaty was mentioned."
129
+ )
130
+ body = [
131
+ f"**Search term**: “{SEARCH_TERM}”.",
132
+ f"**{result_line}**",
133
+ "",
134
+ f"- **{SEARCH_TERM}** → “emergency state treaty” – suggests constitutional crisis powers.",
135
+ f"- **{CORRECT_TERM}** → “National Once-Only Technical System treaty” – "
136
+ "a data-sharing infrastructure for German public administrations.",
137
+ "",
138
+ "Mishearing “NOOTS” as “Not” is an *ASR hallucination*. When an LLM then riffs on "
139
+ "that wrong token, it creates a second-layer hallucination that falsely claims an emergency "
140
+ "law was debated. In reality, the Smart Country convention session discussed register modernisation and once-only data exchange.",
141
+ ]
142
+ return render_status_box(headline, tone), "\n".join(body)
143
+
144
+
145
+ def render_problem_cell() -> None:
146
+ with cell("ℹ️ Problem: ASR hallucinations"):
147
+ gr.Markdown(
148
+ f"""### 👩🏻‍🏫 Background
149
+ Automatically generated transcripts and subtitles provided by video or podcast distribution sites may appear as a straightforward
150
+ source to ground summaries or chat-with-your-video use cases in. With YouTube in particular, however, there is a systemic hallucination risk:
151
+ the anti-money laundering directive "NIS2" may become "these two", the IT concept of "interoperability" may become the unrelated quality of
152
+ "endurability"... and the data sharing treaty for public administration 🇩🇪 "NOOTS-Staatsvertrag" may become emergency state powers
153
+ 🇩🇪 "Notstaatsvertrag". Particularly with non-English languages or non-native speakers of the English language, the hallucination risk
154
+ from Automatic Speech Recognition (ASR) and the hallucination risk from chatbot Large Language Models compound - rendering e.g. ChatGPT Atlas
155
+ a brittle tool for such tasks.
156
+ """,
157
+ )
158
+
159
+ gr.Image(
160
+ value=DIGITALGIPFEL_IMG,
161
+ show_label=True,
162
+ interactive=False,
163
+ elem_id="digitalgipfel-photo",
164
+ label='ASR trip: "asset" turns into "acid"'
165
+ )
166
+
167
+ gr.Markdown("""### 💁🏻‍♀️ Demo
168
+ We're going to download the YouTube subtitles of a panel discussion
169
+ recorded at the Smart Country Convention 2025 - and check if the ASR hallucinated emergency state powers (❌) or got
170
+ the German language term "NOOTS-Staatsvertrag" right (✅). The goal is to make it visible how ASR recognition could
171
+ cause faulty LLM interpretation built on top of them.
172
+ """)
173
+
174
+ url_box = gr.Textbox(
175
+ label="YouTube video URL",
176
+ value=DEFAULT_VIDEO_URL,
177
+ interactive=False,
178
+ )
179
+ check_button = gr.Button("Check transcript for “Notstaatsvertrag”", variant="primary")
180
+ result_panel = gr.HTML(
181
+ value=render_status_box(
182
+ "👉 Click “Check transcript…” to fetch the captions and verify what was actually said.",
183
+ "placeholder",
184
+ )
185
+ )
186
+ result_details = gr.Markdown(visible=True)
187
+ check_button.click(
188
+ fn=analyze_transcript,
189
+ inputs=url_box,
190
+ outputs=[result_panel, result_details],
191
+ queue=False,
192
+ )
demo/solution_cell.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import gradio as gr
4
+
5
+ from layout import cell
6
+
7
+
8
+ def render_solution_cell() -> None:
9
+ with cell("✅ Solution: contextual biasing through priors"):
10
+ gr.Markdown(
11
+ """
12
+ ### 👩🏻‍🏫 Background
13
+ Automatic speech recognition systems can be steered by giving them *context* up front. OpenAI Whisper, for example, supports a **textual
14
+ prompt** that lists likely names, abbreviations, product names, or domain terms. When the audio is ambiguous, the model can bias its
15
+ choices toward those tokens instead of guessing from scratch. This works especially well in high-noise conference settings or niche domains
16
+ where participant names and acronyms rarely appear in generic training data.
17
+
18
+ In Aileen 3, we generalise this idea of “context” into **priors**: structured hints about the user, their expectations, prior knowledge, and
19
+ the media itself (title, channel, description…). On a high level (Aileen 3 Agent), priors are not facts the model has to re-discover – they are the baseline
20
+ that makes it easier to spot surprises, new actors, and genuinely novel claims later on. On a low level (Aileen 3 Core), this concept applies
21
+ to transcription: spelling out how the 🇩🇪 “NOOTS-Staatsvertrag” (data sharing treaty) is supposed to look in writing gives the model a
22
+ strong prior against hallucinating emergency state super powers (🇩🇪 “Notstaatsvertrag”).
23
+
24
+ Multi-modal models such as Google Gemini go one step further than Whisper: they may even accept priors that are not plain text.
25
+ Images of slides from a talk,
26
+ agenda screenshots, or diagrams can be ingested alongside the audio to potentially provide a much richer prior. Internally, the Aileen MCP already
27
+ extracts representative slide images from long-form talks so that this kind of multi-modal prior can be used in downstream analysis – we
28
+ will lean on the same building blocks for transcription.
29
+
30
+ ### 💁🏻‍♀️ Demo
31
+ In the demo below, we are going to treat the **YouTube video description** of the Smart Country Convention session as a prior. While the Aileen MCP
32
+ server's transcription tool allows a user supplied text prior to be passed, we will rely on its internal extraction of the video
33
+ metadata. That way, the Google Gemini model sees both the audio of the talk and a text prior that
34
+ spells out the intended terminology.
35
+
36
+ The goal is to see whether supplying the description as a prior helps the transcription stay anchored on the term of the
37
+ data sharing treaty (NOOTS-Staatsvertrag) instead of hallucinating emergency state (“Notstaatsvertrag”) when the audio is noisy or ambiguous.
38
+
39
+ Before running the demo, we will first run the health check cell to verify that ffmpeg, yt-dlp, the Aileen MCP server, and your Gemini API
40
+ key are all wired up correctly.
41
+ """
42
+ )
43
+