diff --git "a/2025/VALLR_ Visual ASR Language Model for Lip Reading/layout.json" "b/2025/VALLR_ Visual ASR Language Model for Lip Reading/layout.json" new file mode 100644--- /dev/null +++ "b/2025/VALLR_ Visual ASR Language Model for Lip Reading/layout.json" @@ -0,0 +1,9517 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 136, + 103, + 475, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 103, + 475, + 121 + ], + "spans": [ + { + "bbox": [ + 136, + 103, + 475, + 121 + ], + "type": "text", + "content": "VALLR: Visual ASR Language Model for Lip Reading" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 176, + 144, + 434, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 144, + 434, + 172 + ], + "spans": [ + { + "bbox": [ + 176, + 144, + 434, + 172 + ], + "type": "text", + "content": "Marshall Thomas Edward Fish Richard Bowden University of Surrey" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 174, + 493, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 174, + 493, + 185 + ], + "spans": [ + { + "bbox": [ + 111, + 174, + 493, + 185 + ], + "type": "text", + "content": "mt00893@surrey.ac.uk edward.fish@surrey.ac.uk r.bowden@surrey.ac.uk" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 152, + 203, + 200, + 216 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 203, + 200, + 216 + ], + "spans": [ + { + "bbox": [ + 152, + 203, + 200, + 216 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 236, + 297, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 236, + 297, + 606 + ], + "spans": [ + { + "bbox": [ + 53, + 236, + 297, + 606 + ], + "type": "text", + "content": "Lip Reading, or Visual Automatic Speech Recognition (V-ASR), is a complex task requiring the interpretation of spoken language exclusively from visual cues, primarily lip movements and facial expressions. This task is especially challenging due to the absence of auditory information and the inherent ambiguity when visually distinguishing phonemes that have overlapping visemes, where different phonemes appear identical on the lips. Current methods typically attempt to predict words or characters directly from these visual cues, but this approach frequently encounters high error rates due to coarticulation effects and viseme ambiguity. We propose a novel two-stage, phoneme-centric framework for Visual Automatic Speech Recognition (V-ASR) that addresses these longstanding challenges. First, our model predicts a compact sequence of phonemes from visual inputs using a Video Transformer with a CTC head, thereby reducing the task complexity and achieving robust speaker invariance. This phoneme output then serves as the input to a fine-tuned Large Language Model (LLM), which reconstructs coherent words and sentences by leveraging broader linguistic context. Unlike existing methods that either predict words directly or rely on large-scale multimodal pre-training, our approach explicitly encodes intermediate linguistic structure while remaining highly data efficient. We demonstrate state-of-the-art performance on two challenging datasets, LRS2 and LRS3, where our method achieves significant reductions in Word Error Rate (WER) achieving a SOTA WER of 18.7 on LRS3 despite using " + }, + { + "bbox": [ + 53, + 236, + 297, + 606 + ], + "type": "inline_equation", + "content": "99.4\\%" + }, + { + "bbox": [ + 53, + 236, + 297, + 606 + ], + "type": "text", + "content": " less labelled video data than the next best approach. Code is available here: https://gitlab.surrey.ac.uk/" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 634, + 135, + 645 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 634, + 135, + 645 + ], + "spans": [ + { + "bbox": [ + 56, + 634, + 135, + 645 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 653, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 296, + 714 + ], + "type": "text", + "content": "Lip Reading, or Visual Automatic Speech Recognition (V-ASR), involves interpreting spoken language from visual cues such as lip movements and facial expressions. As a natural skill, humans use it to supplement auditory information, and as a technology, it has profound potential for en" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 319, + 205, + 559, + 365 + ], + "blocks": [ + { + "bbox": [ + 319, + 205, + 559, + 365 + ], + "lines": [ + { + "bbox": [ + 319, + 205, + 559, + 365 + ], + "spans": [ + { + "bbox": [ + 319, + 205, + 559, + 365 + ], + "type": "image", + "image_path": "9ae34c4388a4f19abe6b22865a145bffff65591cbe287bfbeb007bdb92f19060.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 376, + 555, + 467 + ], + "lines": [ + { + "bbox": [ + 313, + 376, + 555, + 467 + ], + "spans": [ + { + "bbox": [ + 313, + 376, + 555, + 467 + ], + "type": "text", + "content": "Figure 1. Comparison between different models' performances in WER for Visual Automatic Speech Recognition on the LRS3 dataset [3] when compared with the amount of labelled training data. Circle size (green) denotes scale of pre-training data, while fine-tuned models (grey) are not pre-trained. Our model (orange) outperforms all existing approaches with just 30 hours of video training data, no self-supervised pre-training, and without the requirement for additional labeled visual data during fine-tuning." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 498, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 556, + 715 + ], + "type": "text", + "content": "hancing accessibility, particularly for the Deaf and hard-of-hearing communities, as well as for applications in noisy or privacy-sensitive environments [1]. Despite substantial advancements in ASR, the visual-only interpretation of speech remains an unresolved challenge due to inherent uncertainties in lip movements and their complex temporal dynamics. A central difficulty in V-ASR stems from the inherent ambiguity of visemes, the visual equivalents of phonemes, which often appear nearly identical for different sounds (e.g., 'p' and 'b') [1, 7]. Further complicating this task are coarticulation effects [33], where adjacent sounds blur one another's articulation, making phoneme boundaries visually unclear. Although some recent methods have focussed on robust intermediary representations to deal with these cases, such as leveraging subword-level predictions [41], or quantized latent representations [13], these approaches can still fail to capture broader contextual cues, limiting their ability to resolve visually similar phonemes that might otherwise be" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "spans": [ + { + "bbox": [ + 65, + 2, + 111, + 34 + ], + "type": "text", + "content": "CVF" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "spans": [ + { + "bbox": [ + 146, + 0, + 494, + 37 + ], + "type": "text", + "content": "This ICCV paper is the Open Access version, provided by the Computer Vision Foundation. Except for this watermark, it is identical to the accepted version; the final published version of the proceedings is available on IEEE Xplore." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 316, + 757 + ], + "type": "text", + "content": "2846" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 68, + 555, + 264 + ], + "blocks": [ + { + "bbox": [ + 55, + 68, + 555, + 264 + ], + "lines": [ + { + "bbox": [ + 55, + 68, + 555, + 264 + ], + "spans": [ + { + "bbox": [ + 55, + 68, + 555, + 264 + ], + "type": "image", + "image_path": "85b1f3f5dae7dfb135fece6cf0a7069536516d7e82e8e5024ee5d2691a090d08.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 279, + 555, + 346 + ], + "lines": [ + { + "bbox": [ + 54, + 279, + 555, + 346 + ], + "spans": [ + { + "bbox": [ + 54, + 279, + 555, + 346 + ], + "type": "text", + "content": "Figure 2. An overview of our approach. First we extract facial regions from 16 frames of input video. We apply random pixel masking at " + }, + { + "bbox": [ + 54, + 279, + 555, + 346 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 54, + 279, + 555, + 346 + ], + "type": "text", + "content": " probability, add positional embedding, and encode visual features via a vision transformer encoder (ViT-base [14]). We implement temporal downsampling via 1D convolution and then use a CTC linear head to predict sequences of phonemes. During training, we also fine-tune an LLM to reconstruct sentences from phonemes with a text-only dataset [52]. During inference, the phonemes from the CTC head are processed via the LLM to reconstruct the predicted text. This can be performed end-to-end or in two stages, depending on available resources." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 367, + 255, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 367, + 255, + 379 + ], + "spans": [ + { + "bbox": [ + 55, + 367, + 255, + 379 + ], + "type": "text", + "content": "distinguishable through sentence-level semantics." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 391, + 295, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 391, + 295, + 547 + ], + "spans": [ + { + "bbox": [ + 54, + 391, + 295, + 547 + ], + "type": "text", + "content": "One approach to this challenge is to leverage large language models (LLM's) and attempt to map raw lip movements directly to sentences, thus bypassing the issue of viseme ambiguity [58, 59]. However, bridging the gap between high-dimensional, unstructured visual inputs and detailed textual representations is resource intensive and typically requires huge datasets, or specialized architectures. Moreover, these purely end-to-end pipelines often lack an interpretable intermediary step, making it unclear how the model is handling viseme ambiguity at a finer linguistic granularity. This can lead to hallucination effects, which become more critical in lip reading technologies designed for accessibility, or security applications." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 557, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 557, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 54, + 557, + 295, + 715 + ], + "type": "text", + "content": "Our two-stage, phoneme-centric method addresses both issues. In the first stage, we map short windows of video frames to a discrete, interpretable phoneme sequence. This intermediate representation is significantly easier to learn and more robust against speaker or environmental variations, as phonemes abstract away speaker-specific attributes. Subsequently, in the second stage, we fine-tune an LLM for the task of reconstructing sentences from phonemes. By segmenting the problem, we combine the interpretability and efficiency of phoneme-based prediction with the sophisticated contextual reasoning of modern LLMs. This phoneme " + }, + { + "bbox": [ + 54, + 557, + 295, + 715 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 54, + 557, + 295, + 715 + ], + "type": "text", + "content": " sentence pipeline also aligns with psycholinguistic models of speech perception, where" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 367, + 555, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 367, + 555, + 391 + ], + "spans": [ + { + "bbox": [ + 313, + 367, + 555, + 391 + ], + "type": "text", + "content": "phoneme-level processing naturally precedes lexical access [31, 32]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 397, + 495, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 397, + 495, + 409 + ], + "spans": [ + { + "bbox": [ + 313, + 397, + 495, + 409 + ], + "type": "text", + "content": "Several advantages emerge from this design:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 415, + 555, + 617 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 309, + 415, + 555, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 415, + 555, + 461 + ], + "spans": [ + { + "bbox": [ + 309, + 415, + 555, + 461 + ], + "type": "text", + "content": "(i) Compact Target Space: Predicting only 38 phoneme classes avoids learning entire word-level vocabularies, making the model more data-efficient and simpler to train." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 463, + 554, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 463, + 554, + 521 + ], + "spans": [ + { + "bbox": [ + 307, + 463, + 554, + 521 + ], + "type": "text", + "content": "(ii) LLM-Enhanced Accuracy: A large language model, pre-trained for phoneme " + }, + { + "bbox": [ + 307, + 463, + 554, + 521 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 307, + 463, + 554, + 521 + ], + "type": "text", + "content": " sentence reconstruction, removes the requirement of direct word-level predictions from RGB data, reducing the need for complex multimodal training." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 522, + 553, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 522, + 553, + 559 + ], + "spans": [ + { + "bbox": [ + 304, + 522, + 553, + 559 + ], + "type": "text", + "content": "(iii) Data Availability: Exploiting easily available phoneme " + }, + { + "bbox": [ + 304, + 522, + 553, + 559 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 304, + 522, + 553, + 559 + ], + "type": "text", + "content": " sentence text pairs eliminates reliance on extensive lip-reading video data for pre-training." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 559, + 553, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 559, + 553, + 617 + ], + "spans": [ + { + "bbox": [ + 304, + 559, + 553, + 617 + ], + "type": "text", + "content": "(iv) Interpretability & Error Analysis: The two-stage design yields an explicit phoneme-level output, constraining recognition errors to local phoneme segments, which the LLM subsequently repairs at the word level reducing errors at a sentence level." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 628, + 400, + 639 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 628, + 400, + 639 + ], + "spans": [ + { + "bbox": [ + 313, + 628, + 400, + 639 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 647, + 555, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 647, + 555, + 684 + ], + "spans": [ + { + "bbox": [ + 313, + 647, + 555, + 684 + ], + "type": "text", + "content": "Here, we review key categories of approaches and highlight their advantages and limitations in the context of our proposed phoneme-centric, language-aware approach." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "text", + "content": "Lip Reading: Early methods predominantly relied on statistical models such as Hidden Markov Models (HMMs)" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2847" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 336 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 336 + ], + "type": "text", + "content": "to aggregate temporal sequences of lip movements alongside hand-crafted feature extractors [1, 9, 35, 36, 40, 62]. With the advent of deep learning, coupled with the availability of large-scale datasets such as Grid [12], LRW [11], LRS2 [47], and LRS3 [3], significant progress has been made in recent years. Initially, Convolutional Neural Networks (CNNs) were applied to the task of word recognition [11], with additional temporal processing via RNN's [30, 54, 55]. With greater compute came the possibility to interpret full sentences with approaches applying Automatic Speech Recognition (ASR) methodologies to the Visual Automatic Speech Recognition (V-ASR) task. These included sequence-to-sequence models [2], CTC approaches [5, 46], and hybrid models [37, 38]. These models improved recognition accuracy by learning hierarchical representations of visual features and modelling temporal dependencies. The application of transformer models [14, 21, 44, 50, 53] in V-ASR brought further performance gains in combination with extensive self-supervised pretraining. Methods such as AV-HuBERT [45] and LiteVSR [24] leveraged self-supervised learning to learn robust representations from large amounts of unlabelled video data." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 345, + 296, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 345, + 296, + 633 + ], + "spans": [ + { + "bbox": [ + 55, + 345, + 296, + 633 + ], + "type": "text", + "content": "AV-ASR vs V-ASR: Lip reading approaches can be separated into Visual Automatic Speech Recognition (V-ASR) and Audio-Visual Automatic Speech Recognition (AV-ASR). In the AV-ASR task, labelled audio data is available at training time which can be used to guide the visual encoder in various ways. These can include generating synthetic additional pre-training data [25, 27], or simply fusing audio-visual input features [44-46, 57]. Other methods have used knowledge distillation to inductively transfer knowledge from pre-trained ASR models to visual encoders [4, 28]. However, these methods depend on audio data or pre-trained ASR models, which may not be available or reliable in scenarios where lip reading is most needed, such as noisy environments. V-ASR, on the other hand, is a more challenging task that relies only on visual data in the training stage [1, 56, 60, 62]. Recent methods have also shown the effectiveness of adapting visual inputs to pre-trained ASR models [13, 24, 42] without the need for audio data during training. This method is particularly powerful since the ASR model already has the ability to reconstruct latent representations to words from the pre-training task, however it relies on large amounts of additional data to learn robust visual representations and map them to the audio latent space [42]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 641, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 641, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 641, + 296, + 714 + ], + "type": "text", + "content": "Lip Reading with LLMs: Recent approaches have explored visual attention mechanisms for sub-word units [15, 39, 41, 48] to capture fine-grained linguistic features in lip reading. However, reconstructing these units into coherent sentences remains challenging for networks lacking robust contextual and semantic reasoning capabilities. Early" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 313, + 72, + 554, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 554, + 144 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 554, + 144 + ], + "type": "text", + "content": "phoneme-based methods [16, 49] similarly struggled with effective decoding to word-level representations and these methods were outpaced in performance by end-to-end approaches [5] which could leverage extensive data. As such they have not been fully explored in the context of modern architectures and techniques." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 313, + 150, + 555, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 150, + 555, + 258 + ], + "spans": [ + { + "bbox": [ + 313, + 150, + 555, + 258 + ], + "type": "text", + "content": "For example, recent approaches which have integrated Large Language Models (LLMs) [43, 51] into lip-reading pipelines [58, 59] have started from mapping visual features directly to LLM text embeddings. However, by omitting explicit intermediate representations, these approaches inherit phonetic ambiguities that propagate through the network. This manifests as increased word error rates and reduced robustness, thus requiring substantially more training data to achieve generalization." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 265, + 556, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 265, + 556, + 421 + ], + "spans": [ + { + "bbox": [ + 313, + 265, + 556, + 421 + ], + "type": "text", + "content": "Our work re-examines phonemes as an interpretable discrete representation bridging visual and linguistic domains. Rather than forcing LLMs to perform visual " + }, + { + "bbox": [ + 313, + 265, + 556, + 421 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 313, + 265, + 556, + 421 + ], + "type": "text", + "content": " sentence translation requiring expensive visual-text alignment – we constrain the LLM's role to phoneme " + }, + { + "bbox": [ + 313, + 265, + 556, + 421 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 313, + 265, + 556, + 421 + ], + "type": "text", + "content": " sentence reconstruction. This decomposition offers three key advantages: (1) Phoneme-to-text translation is a well-constrained task requiring only lightweight LLM fine-tuning using abundant textual corpora; (2) Speaker-independent phoneme representations eliminate the need for visual adaptation in the language model; (3) The modular architecture prevents error propagation between visual and linguistic domains while maintaining interpretability." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 314, + 431, + 370, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 431, + 370, + 443 + ], + "spans": [ + { + "bbox": [ + 314, + 431, + 370, + 443 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 451, + 554, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 451, + 554, + 487 + ], + "spans": [ + { + "bbox": [ + 313, + 451, + 554, + 487 + ], + "type": "text", + "content": "In this section, we present our two-stage phoneme-centric approach to visual-only lip reading. Figure 2 provides an overview of the framework. Our goal is to learn a function" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 347, + 497, + 554, + 540 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 497, + 554, + 540 + ], + "spans": [ + { + "bbox": [ + 347, + 497, + 554, + 540 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} f: \\quad \\mathbf {X} = \\left\\{x _ {1}, x _ {2}, \\dots , x _ {T} \\right\\} \\\\ \\longmapsto \\mathbf {P h} = \\left\\{p h _ {1}, p h _ {2}, \\dots , p h _ {m} \\right\\} \\tag {1} \\\\ \\longmapsto \\mathbf {S} = \\left\\{s _ {1}, s _ {2}, \\dots , s _ {M} \\right\\}. \\\\ \\end{array}", + "image_path": "1c4e0152e61f3485d387c2cc5bcb8e2f4a1c92ddbb74501827a936af39131e6b.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "spans": [ + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "inline_equation", + "content": "\\mathbf{X} \\in \\mathbb{R}^{T \\times H \\times W \\times 3}" + }, + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "text", + "content": " is a video sequence of " + }, + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "text", + "content": " frames (with height " + }, + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "text", + "content": " and width " + }, + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "text", + "content": "), " + }, + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "inline_equation", + "content": "\\mathbf{Ph}" + }, + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "text", + "content": " denotes the phoneme sequence, and " + }, + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "inline_equation", + "content": "\\mathbf{S}" + }, + { + "bbox": [ + 313, + 549, + 554, + 598 + ], + "type": "text", + "content": " is the reconstructed sentence. Specifically, we decompose the task into:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 605, + 554, + 664 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 317, + 605, + 554, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 605, + 554, + 628 + ], + "spans": [ + { + "bbox": [ + 317, + 605, + 554, + 628 + ], + "type": "text", + "content": "1. Video " + }, + { + "bbox": [ + 317, + 605, + 554, + 628 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 317, + 605, + 554, + 628 + ], + "type": "text", + "content": " Phoneme: Map the sequence of video frames to phonemes using a Vision Transformer and CTC head." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 629, + 554, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 629, + 554, + 664 + ], + "spans": [ + { + "bbox": [ + 317, + 629, + 554, + 664 + ], + "type": "text", + "content": "2. Phoneme " + }, + { + "bbox": [ + 317, + 629, + 554, + 664 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 317, + 629, + 554, + 664 + ], + "type": "text", + "content": " Sentence: Convert phonemes to a coherent word sequence with a fine-tuned Large Language Model (LLM)." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 671, + 511, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 671, + 511, + 684 + ], + "spans": [ + { + "bbox": [ + 313, + 671, + 511, + 684 + ], + "type": "text", + "content": "The model comprises four primary components:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 689, + 554, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 554, + 712 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 554, + 712 + ], + "type": "text", + "content": "- Visual Feature Extractor: Captures relevant features from each frame via a Vision Transformer." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2848" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 144 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 95 + ], + "type": "text", + "content": "- Adapter Network with Temporal Downsampling: Reduces sequence length to accommodate CTC alignment." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 96, + 294, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 96, + 294, + 120 + ], + "spans": [ + { + "bbox": [ + 55, + 96, + 294, + 120 + ], + "type": "text", + "content": "- CTC Head: Predicts phoneme sequences without requiring explicit temporal labels." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 121, + 294, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 121, + 294, + 144 + ], + "spans": [ + { + "bbox": [ + 55, + 121, + 294, + 144 + ], + "type": "text", + "content": "- LLM for Phoneme-to-Sentence Reconstruction: Translates predicted phonemes into final word sequences." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 152, + 188, + 165 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 152, + 188, + 165 + ], + "spans": [ + { + "bbox": [ + 55, + 152, + 188, + 165 + ], + "type": "text", + "content": "3.1. Pre-Processing Pipeline" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 170, + 294, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 170, + 294, + 229 + ], + "spans": [ + { + "bbox": [ + 55, + 170, + 294, + 229 + ], + "type": "text", + "content": "Each frame " + }, + { + "bbox": [ + 55, + 170, + 294, + 229 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 55, + 170, + 294, + 229 + ], + "type": "text", + "content": " is first passed through a face-detection model [26] to identify and crop the speaker's face region, centered on the speaker's lips. The detected region is then cropped, resized to " + }, + { + "bbox": [ + 55, + 170, + 294, + 229 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 55, + 170, + 294, + 229 + ], + "type": "text", + "content": ", and normalized for batch processing." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 237, + 167, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 237, + 167, + 249 + ], + "spans": [ + { + "bbox": [ + 55, + 237, + 167, + 249 + ], + "type": "text", + "content": "3.2. Video Transformer" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 255, + 294, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 255, + 294, + 326 + ], + "spans": [ + { + "bbox": [ + 55, + 255, + 294, + 326 + ], + "type": "text", + "content": "We employ a Vision Transformer (ViT) [44] to encode the spatio-temporal information in the face region. Let ViT(\\cdot) denote this encoding function. The input " + }, + { + "bbox": [ + 55, + 255, + 294, + 326 + ], + "type": "inline_equation", + "content": "\\mathbf{X} \\in \\mathbb{R}^{T \\times H \\times W \\times 3}" + }, + { + "bbox": [ + 55, + 255, + 294, + 326 + ], + "type": "text", + "content": " is chunked into patches, embedded, and equipped with positional encodings before passing through multiple transformer blocks. We obtain:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 119, + 335, + 294, + 350 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 335, + 294, + 350 + ], + "spans": [ + { + "bbox": [ + 119, + 335, + 294, + 350 + ], + "type": "interline_equation", + "content": "\\mathbf {Z} = \\operatorname {V i T} (\\mathbf {X}) \\in \\mathbb {R} ^ {T \\times D}, \\tag {2}", + "image_path": "d98f970f738a90fde074ddb4e45b53b8ea4fcbae49faa5b480ef340f5ba38ad0.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 360, + 295, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 360, + 295, + 407 + ], + "spans": [ + { + "bbox": [ + 55, + 360, + 295, + 407 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 360, + 295, + 407 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 55, + 360, + 295, + 407 + ], + "type": "text", + "content": " is the transformer output dimension per frame. During training, we randomly mask portions of the input patches to promote robust feature learning and improve generalization." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 415, + 295, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 415, + 295, + 441 + ], + "spans": [ + { + "bbox": [ + 55, + 415, + 295, + 441 + ], + "type": "text", + "content": "3.3. Adapter Network with Temporal Downsampling" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 445, + 295, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 445, + 295, + 494 + ], + "spans": [ + { + "bbox": [ + 55, + 445, + 295, + 494 + ], + "type": "text", + "content": "The sequence length " + }, + { + "bbox": [ + 55, + 445, + 295, + 494 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 55, + 445, + 295, + 494 + ], + "type": "text", + "content": " of the ViT output can be prohibitively large for the subsequent CTC module. To reduce the temporal dimension, we apply a sequence of 1D convolutions and pooling layers, as shown in Table ?? Formally," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 97, + 502, + 294, + 517 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 502, + 294, + 517 + ], + "spans": [ + { + "bbox": [ + 97, + 502, + 294, + 517 + ], + "type": "interline_equation", + "content": "\\mathbf {G} _ {\\text {d o w n}} = \\operatorname {A d a p t} (\\mathbf {Z}) \\in \\mathbb {R} ^ {T ^ {\\prime} \\times C _ {\\text {a d p a t e r}}}, \\tag {3}", + "image_path": "0a33c65084aaf963147e3c4918ff05ab8d8b6d0d865dba5cdba3a2eed2f292ae.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 527, + 295, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 527, + 295, + 552 + ], + "spans": [ + { + "bbox": [ + 55, + 527, + 295, + 552 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 527, + 295, + 552 + ], + "type": "inline_equation", + "content": "T' < T" + }, + { + "bbox": [ + 55, + 527, + 295, + 552 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 527, + 295, + 552 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{adapter}}" + }, + { + "bbox": [ + 55, + 527, + 295, + 552 + ], + "type": "text", + "content": " is an intermediate feature dimension aligned to the CTC head input." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 559, + 129, + 571 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 559, + 129, + 571 + ], + "spans": [ + { + "bbox": [ + 55, + 559, + 129, + 571 + ], + "type": "text", + "content": "3.4. CTC Head" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 55, + 577, + 295, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 577, + 295, + 601 + ], + "spans": [ + { + "bbox": [ + 55, + 577, + 295, + 601 + ], + "type": "text", + "content": "An MLP with one hidden layer transforms " + }, + { + "bbox": [ + 55, + 577, + 295, + 601 + ], + "type": "inline_equation", + "content": "\\mathbf{G}_{\\mathrm{down}} \\in \\mathbb{R}^{T' \\times C_{\\mathrm{adapter}}} \\text{ into phoneme logits:}" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 88, + 610, + 294, + 625 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 610, + 294, + 625 + ], + "spans": [ + { + "bbox": [ + 88, + 610, + 294, + 625 + ], + "type": "interline_equation", + "content": "\\mathbf {H} _ {\\mathrm {c t c}} = \\operatorname {M L P} \\left(\\mathbf {G} _ {\\text {d o w n}}\\right) \\in \\mathbb {R} ^ {T ^ {\\prime} \\times N _ {\\text {p h o n e m e s}}}, \\tag {4}", + "image_path": "e6347d20a541ac7d0e217c412e04fef76cad5fdf8b0dec15c141181dbd205210.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 55, + 635, + 295, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 635, + 295, + 684 + ], + "spans": [ + { + "bbox": [ + 55, + 635, + 295, + 684 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 635, + 295, + 684 + ], + "type": "inline_equation", + "content": "N_{\\text{phonemes}}" + }, + { + "bbox": [ + 55, + 635, + 295, + 684 + ], + "type": "text", + "content": " is the size of our phoneme set (39 English phonemes plus a blank symbol). We then apply a logarithmic softmax along the phoneme dimension to obtain log probabilities " + }, + { + "bbox": [ + 55, + 635, + 295, + 684 + ], + "type": "inline_equation", + "content": "\\log p_{\\text{ctc}}(ph_t \\mid \\mathbf{X})" + }, + { + "bbox": [ + 55, + 635, + 295, + 684 + ], + "type": "text", + "content": " at each time step." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "content": "Since lip movements do not align cleanly with discrete phoneme boundaries, we adopt the Connectionist Temporal" + } + ] + } + ], + "index": 18 + }, + { + "type": "table", + "bbox": [ + 316, + 70, + 565, + 182 + ], + "blocks": [ + { + "bbox": [ + 316, + 70, + 565, + 182 + ], + "lines": [ + { + "bbox": [ + 316, + 70, + 565, + 182 + ], + "spans": [ + { + "bbox": [ + 316, + 70, + 565, + 182 + ], + "type": "table", + "html": "
StageModuleIn → Out ChannelsKernelStridePaddingDownsample FactorOutput Length
1Conv1dfeature_size → adapter_dim522×2L/2
BatchNorm1dadapter_dim → adapter_dim---
ReLU----
2Conv1dadapter_dim → adapter_dim321×2L/4
BatchNorm1dadapter_dim → adapter_dim---
ReLU----
3Conv1dadapter_dim → adapter_dim321×2L/8
BatchNorm1dadapter_dim → adapter_dim---
ReLU----
4Conv1dadapter_dim → adapter_dim331×3L/24
BatchNorm1dadapter_dim → adapter_dim---
ReLU----
5AvgPool1dadapter_dim → adapter_dim580×8L/192
6Linearadapter_dim → CTC_dim---×1L/192
ReLU----
", + "image_path": "1b981d1a719a18feb087fe2df03f3b6b59d1263151fbcdd9f66245eeb8421ddb.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "table_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 190, + 553, + 222 + ], + "lines": [ + { + "bbox": [ + 313, + 190, + 553, + 222 + ], + "spans": [ + { + "bbox": [ + 313, + 190, + 553, + 222 + ], + "type": "text", + "content": "Table 1. Architecture of the adapter for temporal downsampling, where the input length is " + }, + { + "bbox": [ + 313, + 190, + 553, + 222 + ], + "type": "inline_equation", + "content": "T = 1568" + }, + { + "bbox": [ + 313, + 190, + 553, + 222 + ], + "type": "text", + "content": " and final output length is " + }, + { + "bbox": [ + 313, + 190, + 553, + 222 + ], + "type": "inline_equation", + "content": "T = 8" + }, + { + "bbox": [ + 313, + 190, + 553, + 222 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 314, + 242, + 440, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 242, + 440, + 254 + ], + "spans": [ + { + "bbox": [ + 314, + 242, + 440, + 254 + ], + "type": "text", + "content": "Classification (CTC) loss [18]:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 353, + 258, + 553, + 287 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 353, + 258, + 553, + 287 + ], + "spans": [ + { + "bbox": [ + 353, + 258, + 553, + 287 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {C T C}} = - \\ln \\left(\\sum_ {\\alpha \\in \\mathcal {A} (\\mathbf {P h})} P _ {\\mathrm {c t c}} (\\alpha \\mid \\mathbf {X})\\right), \\tag {5}", + "image_path": "e828c004fa14e4bbe1ffccee8205180351bb807c59dc39275620f70abd710a44.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 293, + 555, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 293, + 555, + 377 + ], + "spans": [ + { + "bbox": [ + 313, + 293, + 555, + 377 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 293, + 555, + 377 + ], + "type": "inline_equation", + "content": "\\mathbf{Ph} = (ph_1, \\ldots, ph_m)" + }, + { + "bbox": [ + 313, + 293, + 555, + 377 + ], + "type": "text", + "content": " is the ground-truth phoneme sequence, " + }, + { + "bbox": [ + 313, + 293, + 555, + 377 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(\\mathbf{Ph})" + }, + { + "bbox": [ + 313, + 293, + 555, + 377 + ], + "type": "text", + "content": " is the set of all valid alignments, and " + }, + { + "bbox": [ + 313, + 293, + 555, + 377 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{ctc}}(\\alpha \\mid \\mathbf{X})" + }, + { + "bbox": [ + 313, + 293, + 555, + 377 + ], + "type": "text", + "content": " is the probability of alignment " + }, + { + "bbox": [ + 313, + 293, + 555, + 377 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 293, + 555, + 377 + ], + "type": "text", + "content": ". Minimizing " + }, + { + "bbox": [ + 313, + 293, + 555, + 377 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{CTC}}" + }, + { + "bbox": [ + 313, + 293, + 555, + 377 + ], + "type": "text", + "content": " enables the model to learn frame-to-phoneme mappings without explicit per-frame labels. At inference, we perform beam search over the CTC log probabilities to decode the final phoneme sequence." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 383, + 553, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 383, + 553, + 407 + ], + "spans": [ + { + "bbox": [ + 313, + 383, + 553, + 407 + ], + "type": "text", + "content": "3.5. Phoneme-to-Sentence Reconstruction Using LLM" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 411, + 554, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 411, + 554, + 461 + ], + "spans": [ + { + "bbox": [ + 313, + 411, + 554, + 461 + ], + "type": "text", + "content": "The predicted phoneme sequence " + }, + { + "bbox": [ + 313, + 411, + 554, + 461 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{Ph}}" + }, + { + "bbox": [ + 313, + 411, + 554, + 461 + ], + "type": "text", + "content": " is derived by decoding the CTC logits from Eq. (4) via beam search. We then feed " + }, + { + "bbox": [ + 313, + 411, + 554, + 461 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{Ph}}" + }, + { + "bbox": [ + 313, + 411, + 554, + 461 + ], + "type": "text", + "content": " into a Large Language Model (LLM) that is finetuned to map phonemes to sentences:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 353, + 467, + 553, + 483 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 353, + 467, + 553, + 483 + ], + "spans": [ + { + "bbox": [ + 353, + 467, + 553, + 483 + ], + "type": "interline_equation", + "content": "\\mathbf {S} = \\operatorname {L L M} \\left(\\widehat {\\mathbf {P h}}\\right) = \\left(s _ {1}, s _ {2}, \\dots , s _ {M}\\right). \\tag {6}", + "image_path": "2cd34f4a1f29a870095a9b7a9b7e88242a8be188da04af7fa981ffb33613566f.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 313, + 494, + 554, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 494, + 554, + 567 + ], + "spans": [ + { + "bbox": [ + 313, + 494, + 554, + 567 + ], + "type": "text", + "content": "LoRA Fine-Tuning. To efficiently adapt the LLM, we employ Low-Rank Adaptation (LoRA) [22], injecting two low-rank matrices " + }, + { + "bbox": [ + 313, + 494, + 554, + 567 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 313, + 494, + 554, + 567 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 494, + 554, + 567 + ], + "type": "inline_equation", + "content": "\\mathbf{B}" + }, + { + "bbox": [ + 313, + 494, + 554, + 567 + ], + "type": "text", + "content": " into each linear layer. This greatly reduces the number of trainable parameters compared to full-model fine-tuning. Specifically, for a linear layer " + }, + { + "bbox": [ + 313, + 494, + 554, + 567 + ], + "type": "inline_equation", + "content": "\\mathbf{W}_{\\mathrm{orig}} \\in \\mathbb{R}^{d \\times d}" + }, + { + "bbox": [ + 313, + 494, + 554, + 567 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 328, + 572, + 553, + 586 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 572, + 553, + 586 + ], + "spans": [ + { + "bbox": [ + 328, + 572, + 553, + 586 + ], + "type": "interline_equation", + "content": "\\mathbf {W} ^ {\\prime} = \\mathbf {W} _ {\\text {o r i g}} + \\mathbf {A B}, \\quad \\mathbf {A} \\in \\mathbb {R} ^ {d \\times r}, \\mathbf {B} \\in \\mathbb {R} ^ {r \\times d}, \\tag {7}", + "image_path": "ccebbd6ae82c0ccf4ced8b2f917942d22b673431ca3a83a6965884577103095f.jpg" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 313, + 592, + 547, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 592, + 547, + 604 + ], + "spans": [ + { + "bbox": [ + 313, + 592, + 547, + 604 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 592, + 547, + 604 + ], + "type": "inline_equation", + "content": "r \\ll d" + }, + { + "bbox": [ + 313, + 592, + 547, + 604 + ], + "type": "text", + "content": ". Only " + }, + { + "bbox": [ + 313, + 592, + 547, + 604 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 313, + 592, + 547, + 604 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 592, + 547, + 604 + ], + "type": "inline_equation", + "content": "\\mathbf{B}" + }, + { + "bbox": [ + 313, + 592, + 547, + 604 + ], + "type": "text", + "content": " are updated during training." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 313, + 617, + 554, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 554, + 677 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 554, + 677 + ], + "type": "text", + "content": "Pretraining LLM Objective. We fine-tune the LLM on a large phoneme " + }, + { + "bbox": [ + 313, + 617, + 554, + 677 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 313, + 617, + 554, + 677 + ], + "type": "text", + "content": " sentence corpus generated from WikiText [34]. Let " + }, + { + "bbox": [ + 313, + 617, + 554, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{S} = (s_1, \\ldots, s_M)" + }, + { + "bbox": [ + 313, + 617, + 554, + 677 + ], + "type": "text", + "content": " be the ground-truth sentence for a phoneme sequence " + }, + { + "bbox": [ + 313, + 617, + 554, + 677 + ], + "type": "inline_equation", + "content": "\\mathbf{Ph}" + }, + { + "bbox": [ + 313, + 617, + 554, + 677 + ], + "type": "text", + "content": ". We employ the cross-entropy loss:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 358, + 683, + 553, + 715 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 683, + 553, + 715 + ], + "spans": [ + { + "bbox": [ + 358, + 683, + 553, + 715 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {C E}} = - \\sum_ {t = 1} ^ {M} \\ln p \\left(s _ {t} \\mid \\mathbf {P h}, s _ {1: t - 1}\\right), \\tag {8}", + "image_path": "848cf371eeba7b5f880bfcf43d1c8149917d637217d67f488bf1a031fb1a6cbe.jpg" + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2849" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 122 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 122 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 72, + 294, + 122 + ], + "type": "inline_equation", + "content": "p(s_{t} \\mid \\mathbf{Ph}, s_{1:t-1})" + }, + { + "bbox": [ + 55, + 72, + 294, + 122 + ], + "type": "text", + "content": " is the likelihood of predicting the correct word " + }, + { + "bbox": [ + 55, + 72, + 294, + 122 + ], + "type": "inline_equation", + "content": "s_{t}" + }, + { + "bbox": [ + 55, + 72, + 294, + 122 + ], + "type": "text", + "content": ", given the phoneme sequence and previously generated words. During inference, we prompt the LLM with " + }, + { + "bbox": [ + 55, + 72, + 294, + 122 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{Ph}}" + }, + { + "bbox": [ + 55, + 72, + 294, + 122 + ], + "type": "text", + "content": " to generate " + }, + { + "bbox": [ + 55, + 72, + 294, + 122 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{S}}" + }, + { + "bbox": [ + 55, + 72, + 294, + 122 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 129, + 170, + 142 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 129, + 170, + 142 + ], + "spans": [ + { + "bbox": [ + 55, + 129, + 170, + 142 + ], + "type": "text", + "content": "3.6. Training Procedure" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 146, + 295, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 146, + 295, + 207 + ], + "spans": [ + { + "bbox": [ + 55, + 146, + 295, + 207 + ], + "type": "text", + "content": "Stage 1: Video " + }, + { + "bbox": [ + 55, + 146, + 295, + 207 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 55, + 146, + 295, + 207 + ], + "type": "text", + "content": " Phoneme. We train the visual extractor, adapter, and CTC head to minimize " + }, + { + "bbox": [ + 55, + 146, + 295, + 207 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{CTC}}" + }, + { + "bbox": [ + 55, + 146, + 295, + 207 + ], + "type": "text", + "content": " (Eq. 5) on video-phoneme pairs. By learning without strict alignment constraints, the model remains flexible to a wide range of speaking speeds and styles." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 217, + 295, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 217, + 295, + 277 + ], + "spans": [ + { + "bbox": [ + 55, + 217, + 295, + 277 + ], + "type": "text", + "content": "Stage 2: Phoneme " + }, + { + "bbox": [ + 55, + 217, + 295, + 277 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 55, + 217, + 295, + 277 + ], + "type": "text", + "content": " Sentence. We independently fine-tune the LLM via cross-entropy loss (Eq. 8) on text-based phoneme " + }, + { + "bbox": [ + 55, + 217, + 295, + 277 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 55, + 217, + 295, + 277 + ], + "type": "text", + "content": " sentence datasets. This leverages easily obtained text data, allowing the LLM to learn phonetic-linguistic mappings separately from visual modeling." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 286, + 271, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 286, + 271, + 298 + ], + "spans": [ + { + "bbox": [ + 55, + 286, + 271, + 298 + ], + "type": "text", + "content": "Inference. We compose the trained modules to form:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 124, + 308, + 227, + 323 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 308, + 227, + 323 + ], + "spans": [ + { + "bbox": [ + 124, + 308, + 227, + 323 + ], + "type": "interline_equation", + "content": "\\mathbf {X} \\xrightarrow {f _ {\\mathrm {v i s}}} \\widehat {\\mathbf {P h}} \\xrightarrow {f _ {\\mathrm {L L M}}} \\widehat {\\mathbf {S}},", + "image_path": "d97a329befb0d97f356cbbcd302d4d8d1363e4c1882506d7d2ad95ea4b768c5d.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 332, + 295, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 332, + 295, + 394 + ], + "spans": [ + { + "bbox": [ + 55, + 332, + 295, + 394 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 332, + 295, + 394 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{Ph}}" + }, + { + "bbox": [ + 55, + 332, + 295, + 394 + ], + "type": "text", + "content": " is decoded from CTC logits, and " + }, + { + "bbox": [ + 55, + 332, + 295, + 394 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{S}}" + }, + { + "bbox": [ + 55, + 332, + 295, + 394 + ], + "type": "text", + "content": " is the final sentence. This approach is highly modular and data-efficient, as each stage (visual and linguistic) is learned with its own objective and dataset, yet easily integrated at inference time." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 405, + 137, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 405, + 137, + 418 + ], + "spans": [ + { + "bbox": [ + 55, + 405, + 137, + 418 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 426, + 295, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 426, + 295, + 449 + ], + "spans": [ + { + "bbox": [ + 55, + 426, + 295, + 449 + ], + "type": "text", + "content": "This section outlines the datasets and pre-processing methods used for training the model architecture." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 456, + 101, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 456, + 101, + 468 + ], + "spans": [ + { + "bbox": [ + 55, + 456, + 101, + 468 + ], + "type": "text", + "content": "4.1. Data" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 474, + 295, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 474, + 295, + 521 + ], + "spans": [ + { + "bbox": [ + 55, + 474, + 295, + 521 + ], + "type": "text", + "content": "The visual feature extractor, the adapter with downsampling, and CTC head are trained on the LRS2 [47] and LRS3 [3] datasets. The LLM is fine-tuned on the WikiText [34] dataset." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 528, + 295, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 528, + 295, + 624 + ], + "spans": [ + { + "bbox": [ + 55, + 528, + 295, + 624 + ], + "type": "text", + "content": "LRS2 [47]: The LRS2 dataset consists of 144,482 video clips of spoken sentences from BBC television, consisting of approximately 224.5 hours of footage and each sentences is up to 100 characters in length. The videos are divided into a pre-training set with 96,318 utterances (195 hours), a training set with 45,839 utterances (28 hours), a validation set with 1,082 utterances (0.6 hours) and a test set with 1,243 utterances (0.5 hours)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 629, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 629, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 629, + 295, + 714 + ], + "type": "text", + "content": "LRS3 [3]: The LRS3 dataset describes the largest public audio-visual English dataset collected consists of clips from over 5,000 TED and TEDx talks totaling 438.9 hours. It contains 438.9 hours with 151,819 utterances. Specifically, there are 118,516 utterances in the 'pre-train' set (408 hours), 31,982 utterances in the 'train-val' set (30 hours) and 1,321 utterances in the 'test' set (0.9 hours)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 72, + 553, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 109 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 109 + ], + "type": "text", + "content": "In our setting, unlike other approaches, we do not utilise the pre-training set and train our model on only the 28-hour and 30-hour partitions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 114, + 553, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 114, + 553, + 174 + ], + "spans": [ + { + "bbox": [ + 313, + 114, + 553, + 174 + ], + "type": "text", + "content": "Phoneme Sentence Pairs. Finally, the dataset used for training the LLM was the WikiText [34] dataset with sentences converted into lists of phonemes using the CMU dictionary [52]. These phonemes were then masked and paired with the original sentences." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 181, + 427, + 193 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 181, + 427, + 193 + ], + "spans": [ + { + "bbox": [ + 313, + 181, + 427, + 193 + ], + "type": "text", + "content": "4.2. Evaluation Metrics" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 198, + 553, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 198, + 553, + 282 + ], + "spans": [ + { + "bbox": [ + 313, + 198, + 553, + 282 + ], + "type": "text", + "content": "Following previous works [11, 47] we adopt Word Error Rate [23] (WER) as our evaluation metric for Lip-Reading. WER calculates the percentage of errors in the predicted text compared to the ground truth, accounting for substitutions, insertions, and deletions. Similar to previous studies [3, 11, 41, 42, 47], we report results on all recent AV-ASR and V-ASR approaches." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 293, + 367, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 293, + 367, + 304 + ], + "spans": [ + { + "bbox": [ + 314, + 293, + 367, + 304 + ], + "type": "text", + "content": "5. Results" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 313, + 553, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 313, + 553, + 338 + ], + "spans": [ + { + "bbox": [ + 313, + 313, + 553, + 338 + ], + "type": "text", + "content": "In this section, we present the results of our method and compare them against those of existing SOTA approaches." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 342, + 554, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 342, + 554, + 414 + ], + "spans": [ + { + "bbox": [ + 313, + 342, + 554, + 414 + ], + "type": "text", + "content": "Public vs Private Data on LRS3 [3]: In Table 1, we compare different V-ASR and AV-ASR models based on their WER on the LRS3 dataset, we split the models into two categories; Fully Supervised models with publicly available data and models trained on large-scale non-publicly available datasets." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 420, + 553, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 420, + 553, + 480 + ], + "spans": [ + { + "bbox": [ + 313, + 420, + 553, + 480 + ], + "type": "text", + "content": "The Fully Supervised Models rely solely on publicly available labelled datasets with the best WER being " + }, + { + "bbox": [ + 313, + 420, + 553, + 480 + ], + "type": "inline_equation", + "content": "40.6\\%" + }, + { + "bbox": [ + 313, + 420, + 553, + 480 + ], + "type": "text", + "content": ", which was achieved by VTP [41]. The non-public dataset models leverage extensive, proprietary datasets, achieving much lower WERs with the best being " + }, + { + "bbox": [ + 313, + 420, + 553, + 480 + ], + "type": "inline_equation", + "content": "12.5\\%" + }, + { + "bbox": [ + 313, + 420, + 553, + 480 + ], + "type": "text", + "content": " by LP [10]." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 486, + 554, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 486, + 554, + 582 + ], + "spans": [ + { + "bbox": [ + 313, + 486, + 554, + 582 + ], + "type": "text", + "content": "LRS3 [3]: In Table 1 we compare our results against other methods based on results from the LRS3 dataset. The results in Table 1 demonstrate a consistent trend: models incorporating extensive self-supervised pre-training followed by fine-tuning achieve lower WERs than those using only supervised approaches. However, our model achieves the lowest WER on the LRS3 dataset at " + }, + { + "bbox": [ + 313, + 486, + 554, + 582 + ], + "type": "inline_equation", + "content": "18.7\\%" + }, + { + "bbox": [ + 313, + 486, + 554, + 582 + ], + "type": "text", + "content": " without self-supervised pre-training or additional fine-tuning data." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 587, + 554, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 587, + 554, + 694 + ], + "spans": [ + { + "bbox": [ + 313, + 587, + 554, + 694 + ], + "type": "text", + "content": "LRS2 [47]: In Table 2, we compare our method against several other state-of-the-art approaches for visual-only lip reading, focusing on Word Error Rate (WER) on the LRS2 dataset. From Table 2 we can see that increasing the amount of data available helps improve a model's WER. However, we can also see that even without increasing the size of the dataset, our model still outperforms other models that only use publicly available data, achieving the lowest WER at " + }, + { + "bbox": [ + 313, + 587, + 554, + 694 + ], + "type": "inline_equation", + "content": "20.8\\%" + }, + { + "bbox": [ + 313, + 587, + 554, + 694 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 701, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 701, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 314, + 701, + 553, + 713 + ], + "type": "text", + "content": "Therefore as shown in Tables 2 and 3, we can see that our" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2850" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 70, + 304, + 365 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 304, + 365 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 304, + 365 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 304, + 365 + ], + "type": "table", + "html": "
MethodUnlabeled (hrs)Labeled (hrs)WER (%)
Fully supervised models with publicly available data
ASR distillation [4]-59068.8
Conv-Seq2Seq [60]-85560.1
Discriminative AVSR [57]-59057.8
Hyb. + Conformer [29]-59043.3
VTP [41]-69840.6
Trained on large-scale non-publicly available datasets
Deep-AV-SR [2]-1,51958.9
Large-scale AV-SR [46]-3,88655.1
RNN-T [30]-31,00033.6
VTP [41]-2,67630.7
ViT-3D [44]-90,00017.0
LP [10]-1,000,00012.5
Self-supervised pre-training + Supervised fine-tuning on LRS3
LiRA [28]4333071.9
ASR distillation [4]33459059.8
LiteVSR [24]6395945.7
ES3 [61]4333043.5
AV-HuBERT Large [45]1,7593032.5
Lip2Vec [13]1,7593031.2
Sub-Word[41]2,6762,67630.7
SynthVSR [25]3,65243827.9
Whisper [42]1,7593025.5
Auto-AVSR [27]1,75943325.0
LLaMA-AVSR [8]-175624.0
RAVeN [19]175943323.1
USR [20]1,32643321.5
Supervised finetuning only on LRS3
Ours-3018.7
", + "image_path": "4bb0f2d946f065d9dbc0973f67487c4e9b6a98182344b3591187de9a2b301982.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 78, + 503, + 104, + 529 + ], + "blocks": [ + { + "bbox": [ + 78, + 503, + 104, + 529 + ], + "lines": [ + { + "bbox": [ + 78, + 503, + 104, + 529 + ], + "spans": [ + { + "bbox": [ + 78, + 503, + 104, + 529 + ], + "type": "image", + "image_path": "2309680e9fc8df15b81d4649f6ee4748410ca448eec1e3eceae9d15922ee1af3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 105, + 504, + 119, + 529 + ], + "blocks": [ + { + "bbox": [ + 105, + 504, + 119, + 529 + ], + "lines": [ + { + "bbox": [ + 105, + 504, + 119, + 529 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 119, + 529 + ], + "type": "image", + "image_path": "7754bf5651d54cce94bc54485751dcdadbf2b73bbffaf38ff8144f09528b3941.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 120, + 503, + 138, + 529 + ], + "blocks": [ + { + "bbox": [ + 120, + 503, + 138, + 529 + ], + "lines": [ + { + "bbox": [ + 120, + 503, + 138, + 529 + ], + "spans": [ + { + "bbox": [ + 120, + 503, + 138, + 529 + ], + "type": "image", + "image_path": "5241836870d9d791c95f8c9159eb00903de7d664b498f268b9fcd5a3aec71514.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 138, + 503, + 155, + 529 + ], + "blocks": [ + { + "bbox": [ + 138, + 503, + 155, + 529 + ], + "lines": [ + { + "bbox": [ + 138, + 503, + 155, + 529 + ], + "spans": [ + { + "bbox": [ + 138, + 503, + 155, + 529 + ], + "type": "image", + "image_path": "276819e67a971584a49e4d208e6f4455af7997117101357a4747b9be5c95485c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 156, + 503, + 172, + 529 + ], + "blocks": [ + { + "bbox": [ + 156, + 503, + 172, + 529 + ], + "lines": [ + { + "bbox": [ + 156, + 503, + 172, + 529 + ], + "spans": [ + { + "bbox": [ + 156, + 503, + 172, + 529 + ], + "type": "image", + "image_path": "513f852c48a8ad848e828353e103328bee41306c6fde1fbc3a53f93a75960893.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 173, + 503, + 189, + 529 + ], + "blocks": [ + { + "bbox": [ + 173, + 503, + 189, + 529 + ], + "lines": [ + { + "bbox": [ + 173, + 503, + 189, + 529 + ], + "spans": [ + { + "bbox": [ + 173, + 503, + 189, + 529 + ], + "type": "image", + "image_path": "015424b644c8663954efc1f779652ecdfffb7b59c66d495a4885f69ecca87fda.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 189, + 503, + 205, + 529 + ], + "blocks": [ + { + "bbox": [ + 189, + 503, + 205, + 529 + ], + "lines": [ + { + "bbox": [ + 189, + 503, + 205, + 529 + ], + "spans": [ + { + "bbox": [ + 189, + 503, + 205, + 529 + ], + "type": "image", + "image_path": "0aa3d8102ac7868c0e1bcdc5bebd27111bf0d66867bd2c741b52605324fd8002.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 206, + 503, + 222, + 529 + ], + "blocks": [ + { + "bbox": [ + 206, + 503, + 222, + 529 + ], + "lines": [ + { + "bbox": [ + 206, + 503, + 222, + 529 + ], + "spans": [ + { + "bbox": [ + 206, + 503, + 222, + 529 + ], + "type": "image", + "image_path": "4cf7ce9a562052ae9dd55dc21fbc0e6a62e0fbb85558cbb0d60521588ee3f667.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 223, + 503, + 239, + 529 + ], + "blocks": [ + { + "bbox": [ + 223, + 503, + 239, + 529 + ], + "lines": [ + { + "bbox": [ + 223, + 503, + 239, + 529 + ], + "spans": [ + { + "bbox": [ + 223, + 503, + 239, + 529 + ], + "type": "image", + "image_path": "0a1a3fecf95803ab3592d83a37edcb4301f5f641561753b2b269d90f613aa441.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 240, + 503, + 257, + 529 + ], + "blocks": [ + { + "bbox": [ + 240, + 503, + 257, + 529 + ], + "lines": [ + { + "bbox": [ + 240, + 503, + 257, + 529 + ], + "spans": [ + { + "bbox": [ + 240, + 503, + 257, + 529 + ], + "type": "image", + "image_path": "a4c6362e1c7e6d602efec1dab005d55e5828f46a9a4f415b27088b2f964df35e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 257, + 503, + 273, + 529 + ], + "blocks": [ + { + "bbox": [ + 257, + 503, + 273, + 529 + ], + "lines": [ + { + "bbox": [ + 257, + 503, + 273, + 529 + ], + "spans": [ + { + "bbox": [ + 257, + 503, + 273, + 529 + ], + "type": "image", + "image_path": "5af5aa88bf55dd63f55f2c27f5fb8caddf21de23c2f3aca92a2e4b6dfa0146fa.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 274, + 503, + 290, + 529 + ], + "blocks": [ + { + "bbox": [ + 274, + 503, + 290, + 529 + ], + "lines": [ + { + "bbox": [ + 274, + 503, + 290, + 529 + ], + "spans": [ + { + "bbox": [ + 274, + 503, + 290, + 529 + ], + "type": "image", + "image_path": "861cf9126b1057f0a3876f194369d5f406de253a33de9d56ccb82e45cfed29e5.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 290, + 503, + 306, + 529 + ], + "blocks": [ + { + "bbox": [ + 290, + 503, + 306, + 529 + ], + "lines": [ + { + "bbox": [ + 290, + 503, + 306, + 529 + ], + "spans": [ + { + "bbox": [ + 290, + 503, + 306, + 529 + ], + "type": "image", + "image_path": "9fa901c1b1ede2859a34c2e0b0be5fd01e9a440a82cb5245f16a3afbdcb64885.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 58, + 536, + 306, + 613 + ], + "blocks": [ + { + "bbox": [ + 55, + 373, + 296, + 495 + ], + "lines": [ + { + "bbox": [ + 55, + 373, + 296, + 495 + ], + "spans": [ + { + "bbox": [ + 55, + 373, + 296, + 495 + ], + "type": "text", + "content": "Table 2. Comparison of Word Error Rate (WER) across different models for visual-only speech recognition on the LRS3 dataset. The table is divided into three sections: fully supervised models trained on publicly available data, models trained on large-scale non-public datasets, and models that leverage self-supervised pre-training with supervised fine-tuning on LRS3 [3]. Each entry details the amount of labelled and unlabelled data used and the resulting WER. Our approach, tested with various language models, achieves the lowest WER of " + }, + { + "bbox": [ + 55, + 373, + 296, + 495 + ], + "type": "inline_equation", + "content": "22.1\\%" + }, + { + "bbox": [ + 55, + 373, + 296, + 495 + ], + "type": "text", + "content": " with the Llama 3.2-3B model, demonstrating the effectiveness of our phoneme-based, LLM-assisted approach." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 536, + 306, + 613 + ], + "lines": [ + { + "bbox": [ + 58, + 536, + 306, + 613 + ], + "spans": [ + { + "bbox": [ + 58, + 536, + 306, + 613 + ], + "type": "table", + "html": "
Predicted \nPhonemeW, AA, ZDH, AHK, AY, N, DAH, V
Predicted \nWordWasthekindof
GTWasthekindof
Predicted \nPhonemeAH, B, S, EH, SH, \nAH, NDH, AE, THH, AE, DHH, ER
Predicted \nWordobsessionthathadher
GTobsessionthathadher
", + "image_path": "4409cabae598a4e370a820bb58e44f6d466d251279aa3f089505cb0d8a8076d4.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 317, + 70, + 563, + 206 + ], + "blocks": [ + { + "bbox": [ + 55, + 621, + 295, + 687 + ], + "lines": [ + { + "bbox": [ + 55, + 621, + 295, + 687 + ], + "spans": [ + { + "bbox": [ + 55, + 621, + 295, + 687 + ], + "type": "text", + "content": "Figure 3. Example of the model's phonetic and sentence outputs from a sample in the LRS3 dataset [3]. The table illustrates the model's ability to predict a sequence of phonemes from visual input, which are then reconstructed into a coherent sentence by the LLM. In this example all of the phonemes are predicted correctly and the words are recreated correctly." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 70, + 563, + 206 + ], + "lines": [ + { + "bbox": [ + 317, + 70, + 563, + 206 + ], + "spans": [ + { + "bbox": [ + 317, + 70, + 563, + 206 + ], + "type": "table", + "html": "
MethodUnlabeled (hrs)Labeled (hrs)WER (%)
Fully supervised models with publicly available data
Auto-AV-SR [27]-81827.9
Auto-AV-SR [27]-344814.6
Self-supervised pre-training + Supervised finetuning on LRS2 for AVSR
LiRA [28]1,75943338.8
ES3 [61]1,75922326.7
Sub-Word [41]2,6762,67622.6
RAVeN [19]1,75922317.9
USR [20]1,75922315.4
Supervised finetuning only on LRS2
Ours-2820.8
", + "image_path": "c720ad6324dd4c6cc3507a3073064c3edc26b7b5fb6b9cf8752d83efaa366110.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 214, + 555, + 292 + ], + "lines": [ + { + "bbox": [ + 313, + 214, + 555, + 292 + ], + "spans": [ + { + "bbox": [ + 313, + 214, + 555, + 292 + ], + "type": "text", + "content": "Table 3. Comparison of Word Error Rate (WER) for various self-supervised pre-training and supervised fine-tuning methods evaluated on the LRS2 [47] dataset. Our method achieves competitive performance with other methods, without the requirement of extensive pre-training, additional labelled data, or additional audio modality as in the best performing approaches. We are the only method to train on only the LRS2 train dataset." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 312, + 555, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 312, + 555, + 516 + ], + "spans": [ + { + "bbox": [ + 313, + 312, + 555, + 516 + ], + "type": "text", + "content": "method provides a lower WER with the same amount, or less data, without the requirement of self-supervised pretraining as current approaches [4, 13, 24, 25, 27, 28, 42, 45]. This large increase in performance and generalisation can be explained by the powerful combination of the multi-task objectives and the ability of the LLM to reconstruct sentences accurately even when phonemes are predicted incorrectly. In Fig. 4, we find that the phonetic output of the visual encoder is sometimes incorrect or misses phonemes, but the LLM is able to still reconstruct the correct word (as in the example " + }, + { + "bbox": [ + 313, + 312, + 555, + 516 + ], + "type": "inline_equation", + "content": "z \\rightarrow s" + }, + { + "bbox": [ + 313, + 312, + 555, + 516 + ], + "type": "text", + "content": "). We also show how the model still struggles with tricky homophones such as your and you're. In Fig. 3, we show an additional example of the model's outputs at both phonetic and word levels, demonstrating how phonemes are correctly predicted and combined by the LLM for sentence reconstruction. For further examples please see the appendix." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 522, + 382, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 522, + 382, + 533 + ], + "spans": [ + { + "bbox": [ + 313, + 522, + 382, + 533 + ], + "type": "text", + "content": "5.1. Ablations" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 540, + 554, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 540, + 554, + 601 + ], + "spans": [ + { + "bbox": [ + 313, + 540, + 554, + 601 + ], + "type": "text", + "content": "In this section, we aim to understand the performance contribution of each component in the network and their performance on visual " + }, + { + "bbox": [ + 313, + 540, + 554, + 601 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 313, + 540, + 554, + 601 + ], + "type": "text", + "content": " phoneme prediction and phoneme " + }, + { + "bbox": [ + 313, + 540, + 554, + 601 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 313, + 540, + 554, + 601 + ], + "type": "text", + "content": " word reconstruction, alongside ablations related to architectural decisions, and the limitations of the approach." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 605, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 605, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 555, + 713 + ], + "type": "text", + "content": "Visual " + }, + { + "bbox": [ + 313, + 605, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 313, + 605, + 555, + 713 + ], + "type": "text", + "content": " Phoneme: The confusion matrix in Fig. 5 shows phoneme prediction performance of the model on the LRS3 [3] dataset. The matrix indicates that some phonemes are misclassified more frequently than others, as seen by the off-diagonal elements, with the worst missed classifications being highlighted with a red bounding box. This bias could be due to the similarity in the visual representation of these sounds, making it challenging for the model to distinguish between them. For instance, /S/ and /Z/ may often be con-" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 314, + 757 + ], + "type": "text", + "content": "2851" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 69, + 306, + 95 + ], + "blocks": [ + { + "bbox": [ + 78, + 69, + 306, + 95 + ], + "lines": [ + { + "bbox": [ + 78, + 69, + 306, + 95 + ], + "spans": [ + { + "bbox": [ + 78, + 69, + 306, + 95 + ], + "type": "image", + "image_path": "4e8fc1ec0b0814031a2d09ff8bf89e914bb5512b94eccdc80d0511cedb025fbc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 187, + 296, + 266 + ], + "lines": [ + { + "bbox": [ + 55, + 187, + 296, + 266 + ], + "spans": [ + { + "bbox": [ + 55, + 187, + 296, + 266 + ], + "type": "text", + "content": "Figure 4. Comparison of the model's phonetic and sentence outputs with ground truth from a sample in the LRS3 [3] dataset. Red shows an incorrect prediction, M shows a missing prediction and green shows a correction from phonemes to words. In this example, even though the model incorrectly predicts certain phonemes, the LLM can correctly recreate the word but struggles to recreate homophones." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 57, + 102, + 306, + 178 + ], + "blocks": [ + { + "bbox": [ + 57, + 102, + 306, + 178 + ], + "lines": [ + { + "bbox": [ + 57, + 102, + 306, + 178 + ], + "spans": [ + { + "bbox": [ + 57, + 102, + 306, + 178 + ], + "type": "table", + "html": "
Predicted \nPhonemeY, UH, RJH, AH, Z, TW, AH, NAH, V
Predicted \nWordYourjustoneof
GTYou'rejustoneof
Predicted \nPhonemeDH, AHP, IY, P, <M>, LHH, UWHH, AE, S, N, T
Predicted \nWordthepeoplewhohasn't
GTthepeoplewhohasn't
", + "image_path": "8e711fe5e567d49e2d32bb3875ffaa3e363b360ab9840fb1300f0144e21978e0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 285, + 295, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 285, + 295, + 309 + ], + "spans": [ + { + "bbox": [ + 55, + 285, + 295, + 309 + ], + "type": "text", + "content": "fused due to similar lip movements, leading to miss classifications between these phonemes." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 55, + 317, + 307, + 548 + ], + "blocks": [ + { + "bbox": [ + 55, + 317, + 307, + 548 + ], + "lines": [ + { + "bbox": [ + 55, + 317, + 307, + 548 + ], + "spans": [ + { + "bbox": [ + 55, + 317, + 307, + 548 + ], + "type": "image", + "image_path": "f7d26b30e521eb6a97cd06f3b94f654e12b2cf2e2cd055439660d384e40abed9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 555, + 295, + 601 + ], + "lines": [ + { + "bbox": [ + 55, + 555, + 295, + 601 + ], + "spans": [ + { + "bbox": [ + 55, + 555, + 295, + 601 + ], + "type": "text", + "content": "Figure 5. Confusion matrix showing the performance on isolated phonemes of the LRS3 [3] dataset. We observe a very high match rate between the predicted phonemes and the ground truth. In red, we show the most difficult phonemes for our model to identify." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 611, + 295, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 611, + 295, + 684 + ], + "spans": [ + { + "bbox": [ + 55, + 611, + 295, + 684 + ], + "type": "text", + "content": "Phoneme " + }, + { + "bbox": [ + 55, + 611, + 295, + 684 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 55, + 611, + 295, + 684 + ], + "type": "text", + "content": " Word: In Table 4 we take the top 5 words in alphabetical order that are misclassified when predicting words directly from phonemes after the LLM pretraining stage. We can observe that these are words that may occur infrequently in the training set and are missclassified due to some phonetic similarities between the words." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "Different LLMs: In this ablation study, we evaluate three different Language models on our model architecture to" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 317, + 69, + 563, + 133 + ], + "blocks": [ + { + "bbox": [ + 317, + 69, + 563, + 133 + ], + "lines": [ + { + "bbox": [ + 317, + 69, + 563, + 133 + ], + "spans": [ + { + "bbox": [ + 317, + 69, + 563, + 133 + ], + "type": "table", + "html": "
PhonemesTrue WordPredicted Word
/EH/ /R/ /AH/ /N/ /S/ /AH/ /N/AaronsonErasmus
/AA/ /B/ /ER/ /G/AabergBraggartly
/AH/ /S/ /EH/ /R /AH/AcerraEsraa
/AE/ /S/ /AH/ /T/ /EY /T/AcetateAsquette
/AH/ /K/ /AH/ /S/ /T/ /AH/ /M/ /D/AccustomedAccosted
", + "image_path": "9d3d4061eb0bfc8f590cb45171de428e906f503bc03b483afafcb52a7cd45f4b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 230, + 555, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 230, + 555, + 422 + ], + "spans": [ + { + "bbox": [ + 313, + 230, + 555, + 422 + ], + "type": "text", + "content": "understand the impact on word error rate (WER) performance. The first model, GPT 2 [43], serves as our baseline, consisting of the default GPT 2 [43] without fine-tuning on lip-reading datasets. This model achieves a WER of " + }, + { + "bbox": [ + 313, + 230, + 555, + 422 + ], + "type": "inline_equation", + "content": "23.9\\%" + }, + { + "bbox": [ + 313, + 230, + 555, + 422 + ], + "type": "text", + "content": ", indicating the core components' foundational capabilities. The second model, Llama 3.2-1B [51] introduces fine-tuning on phoneme word pairs, enhancing the model's contextual understanding of phoneme sequences. With this addition, Llama 3.2-1B [51] has a substantial improvement, reducing the WER to " + }, + { + "bbox": [ + 313, + 230, + 555, + 422 + ], + "type": "inline_equation", + "content": "22.8\\%" + }, + { + "bbox": [ + 313, + 230, + 555, + 422 + ], + "type": "text", + "content": ". Finally, Llama 3.2-3B [51] also incorporates fine-tuning for phoneme-to-word reconstruction. However, Llama 3.2-3B [51] is a larger model with more parameters than the Llama 3.2-1B [51] model, further lowering the WER to " + }, + { + "bbox": [ + 313, + 230, + 555, + 422 + ], + "type": "inline_equation", + "content": "18.7\\%" + }, + { + "bbox": [ + 313, + 230, + 555, + 422 + ], + "type": "text", + "content": ". This progression highlights the significant contributions of fine-tuning for purpose-specific LLMs." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 323, + 433, + 547, + 495 + ], + "blocks": [ + { + "bbox": [ + 313, + 141, + 555, + 207 + ], + "lines": [ + { + "bbox": [ + 313, + 141, + 555, + 207 + ], + "spans": [ + { + "bbox": [ + 313, + 141, + 555, + 207 + ], + "type": "text", + "content": "Table 4. Examples of misclassification's between predicted words and ground truth based on phoneme sequences. Each row presents a sequence of phonemes, the intended (true) word, and the model's predicted word. Differences in predictions illustrate common misclassifications, often due to phonetic similarities between visually indistinct sounds." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 323, + 433, + 547, + 495 + ], + "lines": [ + { + "bbox": [ + 323, + 433, + 547, + 495 + ], + "spans": [ + { + "bbox": [ + 323, + 433, + 547, + 495 + ], + "type": "table", + "html": "
ModelParameters (B)WER (%)
GPT-2 Small [43]0.1233.9
Llama 3.2-1B [51]1.022.8
Llama 3.2-3B [51]3.018.7
", + "image_path": "cb11ee012b8e5c7758268e42e9f87c27a56e49644ba4c76ffcde0a43b1da4199.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 502, + 555, + 591 + ], + "lines": [ + { + "bbox": [ + 313, + 502, + 555, + 591 + ], + "spans": [ + { + "bbox": [ + 313, + 502, + 555, + 591 + ], + "type": "text", + "content": "Table 5. Comparison of Word Error Rate (WER) for different large language models (LLMs) tested with our model. The table includes each model's parameter count (in billions) and resulting WER on the LRS3 [3] dataset. GPT-2 [43] Small with 0.12B parameters has the highest WER at " + }, + { + "bbox": [ + 313, + 502, + 555, + 591 + ], + "type": "inline_equation", + "content": "23.9\\%" + }, + { + "bbox": [ + 313, + 502, + 555, + 591 + ], + "type": "text", + "content": ". In contrast, Llama 3.2-3B [51] achieves the best WER of " + }, + { + "bbox": [ + 313, + 502, + 555, + 591 + ], + "type": "inline_equation", + "content": "18.7\\%" + }, + { + "bbox": [ + 313, + 502, + 555, + 591 + ], + "type": "text", + "content": ", highlighting that increased model capacity improves phoneme-to-word reconstruction accuracy." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 605, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 605, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 556, + 715 + ], + "type": "text", + "content": "Freezing the CTC Head: Inspired by works that directly map visual features to pretrained ASR networks [42] we investigate the effect of initialising the CTC head in our model with weights from a pre-trained Wav2Vec2 ASR model [6]. The CTC head plays a crucial role in mapping visual features to the phoneme sequences, and we hypothesized that initialising and freezing its parameters could reduce computational overhead while preserving feature alignment from audio pre-training. To test this hypothesis a baseline model" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2852" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "type": "text", + "content": "with a non-frozen CTC head and a comparison model with a frozen CTC head were trained and a comparison of the results was made as shown in Table 6. Freezing the CTC head led to a slight increase in WER of " + }, + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "type": "inline_equation", + "content": "0.4\\%" + }, + { + "bbox": [ + 55, + 72, + 294, + 167 + ], + "type": "text", + "content": ", indicating a decrease in performance. This result suggests that while the frozen CTC head still provided a foundation for phoneme alignment, we did not require extensive audio pre-training to obtain superior performance." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 175, + 296, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 175, + 296, + 378 + ], + "spans": [ + { + "bbox": [ + 56, + 175, + 296, + 378 + ], + "type": "text", + "content": "Varying Sample Sizes: To understand the impact of additional data on performance, we vary the quantity of data used to train both the Video " + }, + { + "bbox": [ + 56, + 175, + 296, + 378 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 56, + 175, + 296, + 378 + ], + "type": "text", + "content": " Phoneme network and the Phoneme " + }, + { + "bbox": [ + 56, + 175, + 296, + 378 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 56, + 175, + 296, + 378 + ], + "type": "text", + "content": " Sentence LLM and investigate the effect this has on the WER when evaluated on the LRS3 [3] dataset. By varying the percentages of data used in training, we can see the effect on the model in Figure 6. Decreasing the amount of data used to train both models has the expected effect on the WER by increasing it. However, increasing the amount of training data for the Feature Extractor has a negligible effect after 30 hours of data. However, by increasing the amount of training data for the LLM we can also increase the effectiveness of the model and decrease the total WER down further to " + }, + { + "bbox": [ + 56, + 175, + 296, + 378 + ], + "type": "inline_equation", + "content": "17.5\\%" + }, + { + "bbox": [ + 56, + 175, + 296, + 378 + ], + "type": "text", + "content": ". The additional data is randomly sampled from both AvSpeech [17] for the visual encoder, and a BookCorpus [63] replica to match the quantity of data in WikiText." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 70, + 402, + 284, + 571 + ], + "blocks": [ + { + "bbox": [ + 70, + 402, + 284, + 571 + ], + "lines": [ + { + "bbox": [ + 70, + 402, + 284, + 571 + ], + "spans": [ + { + "bbox": [ + 70, + 402, + 284, + 571 + ], + "type": "image", + "image_path": "9039c2c35aa9c82d656a47cc63694fed24bdefa3cff3157abb122551594fff59.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 585, + 295, + 673 + ], + "lines": [ + { + "bbox": [ + 55, + 585, + 295, + 673 + ], + "spans": [ + { + "bbox": [ + 55, + 585, + 295, + 673 + ], + "type": "text", + "content": "Figure 6. Graph showing the effect of varying the sample size of the training datasets for both the Video " + }, + { + "bbox": [ + 55, + 585, + 295, + 673 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 55, + 585, + 295, + 673 + ], + "type": "text", + "content": " Phoneme network (in blue) and for the Phoneme " + }, + { + "bbox": [ + 55, + 585, + 295, + 673 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 55, + 585, + 295, + 673 + ], + "type": "text", + "content": " Sentence LLM (in red). The graph shows that decreasing the amount of data negatively impacts the model for both networks, increasing the amount of training data for the Video " + }, + { + "bbox": [ + 55, + 585, + 295, + 673 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 55, + 585, + 295, + 673 + ], + "type": "text", + "content": " Phoneme network has negligible effect and increasing the amount of training data for the Phoneme " + }, + { + "bbox": [ + 55, + 585, + 295, + 673 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 55, + 585, + 295, + 673 + ], + "type": "text", + "content": " Sentence LLM has a noticeable positive effect." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "content": "End-To-End: For our final study, we investigate the effect of removing the video " + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "content": " phoneme stage and train the" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 72, + 553, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 168 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 168 + ], + "type": "text", + "content": "model to directly predict sentences from the video features, reproducing the method in [41] but with the same ViT encoder [44], LLama LLM [51], and limited data set. As shown in Table 6 we achieve aWER of " + }, + { + "bbox": [ + 313, + 72, + 553, + 168 + ], + "type": "inline_equation", + "content": "25.6\\%" + }, + { + "bbox": [ + 313, + 72, + 553, + 168 + ], + "type": "text", + "content": " on LRS3, demonstrating that the more parameterised transformer encoder improves performance over the CNN encoder used in [41]. However, our two stage approach still performs significantly better." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 343, + 180, + 526, + 242 + ], + "blocks": [ + { + "bbox": [ + 343, + 180, + 526, + 242 + ], + "lines": [ + { + "bbox": [ + 343, + 180, + 526, + 242 + ], + "spans": [ + { + "bbox": [ + 343, + 180, + 526, + 242 + ], + "type": "table", + "html": "
ModelWER (%)
Ours (Two Stage)18.7
ViT + LLM (End to End)25.6
Sub-Word (End to End) [41]30.7
", + "image_path": "f233a11b44b49368eb2ad65b5c63bc99b6dc35c265d76a66a54f5192fb4b4bff.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 249, + 555, + 305 + ], + "lines": [ + { + "bbox": [ + 313, + 249, + 555, + 305 + ], + "spans": [ + { + "bbox": [ + 313, + 249, + 555, + 305 + ], + "type": "text", + "content": "Table 6. Comparison of Word Error Rate (WER) between using the intermediary Phonemes and skipping the CTC head. Removing the intermediary representation increases the WER from 18.7 to 25.6, showing the importance of the intermediary step and the effectiveness of the phoneme centric fine-tuning." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 325, + 471, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 325, + 471, + 338 + ], + "spans": [ + { + "bbox": [ + 313, + 325, + 471, + 338 + ], + "type": "text", + "content": "6. Conclusion and Limitations" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 345, + 555, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 345, + 555, + 453 + ], + "spans": [ + { + "bbox": [ + 313, + 345, + 555, + 453 + ], + "type": "text", + "content": "We have introduced a two-stage, phoneme-centric framework for visual-only lip reading that first predicts phonemes from lip movements and then reconstructs coherent sentences via a fine-tuned LLM. This design sharply reduces word error rates on benchmark datasets (LRS2 [47] and LRS3 [3]), providing improvements of " + }, + { + "bbox": [ + 313, + 345, + 555, + 453 + ], + "type": "inline_equation", + "content": "3.5\\%" + }, + { + "bbox": [ + 313, + 345, + 555, + 453 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 345, + 555, + 453 + ], + "type": "inline_equation", + "content": "6.8\\%" + }, + { + "bbox": [ + 313, + 345, + 555, + 453 + ], + "type": "text", + "content": " WER, respectively, over existing state-of-the-art approaches that focus on end-to-end connectionist approaches." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 461, + 556, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 461, + 556, + 594 + ], + "spans": [ + { + "bbox": [ + 313, + 461, + 556, + 594 + ], + "type": "text", + "content": "Despite these gains, the approach remains limited by the visual ambiguity of certain phonemes. Lip movements alone cannot always differentiate minimal pairs such as /s/ vs. /z/, and words that share near-identical phoneme sequences (e.g., \"Hello\" vs. \"Hallow\") can still result in misclassification. We plan to address these issues by refining the phoneme-to-word generation process, potentially through deeper linguistic context modeling and more specialized phoneme embeddings. Future work may also explore speaker-adaptive fine-tuning or multi-frame alignment strategies to better handle subtle visual distinctions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 605, + 417, + 619 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 605, + 417, + 619 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 417, + 619 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 624, + 555, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 624, + 555, + 703 + ], + "spans": [ + { + "bbox": [ + 313, + 624, + 555, + 703 + ], + "type": "text", + "content": "This work was supported by the SNSF project 'SMILE II' (CRSII5 193686), the Innosuisse IICT Flagship (PFFS-21-47), EPSRC grant APP24554 (SignGPT-EP/Z535370/1) and through funding from Google.org via the AI for Global Goals scheme. This work reflects only the author's views and the funders are not responsible for any use that may be made of the information it contains." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2853" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 91, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 61, + 91, + 294, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 91, + 294, + 123 + ], + "spans": [ + { + "bbox": [ + 61, + 91, + 294, + 123 + ], + "type": "text", + "content": "[1] Emre Afacan and Gültekin Demirci. A survey on automatic lip-reading: Challenges and future directions. Multimedia Tools and Applications, 2019. 1, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 124, + 295, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 124, + 295, + 167 + ], + "spans": [ + { + "bbox": [ + 61, + 124, + 295, + 167 + ], + "type": "text", + "content": "[2] Triantafyllos Afouras, Joon Son Chung, Andrew Senior, Oriol Vinyals, and Andrew Zisserman. Deep audio-visual speech recognition. IEEE transactions on pattern analysis and machine intelligence, 2018. 3, 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 168, + 295, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 168, + 295, + 211 + ], + "spans": [ + { + "bbox": [ + 62, + 168, + 295, + 211 + ], + "type": "text", + "content": "[3] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. Lrs3-ted: a large-scale dataset for visual speech recognition. arXiv preprint arXiv:1809.00496, 2018. 1, 3, 5, 6, 7, 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 213, + 294, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 213, + 294, + 266 + ], + "spans": [ + { + "bbox": [ + 62, + 213, + 294, + 266 + ], + "type": "text", + "content": "[4] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisserman. Asr is all you need: Cross-modal distillation for lip reading. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 2143-2147. IEEE, 2020. 3, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 268, + 294, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 268, + 294, + 301 + ], + "spans": [ + { + "bbox": [ + 62, + 268, + 294, + 301 + ], + "type": "text", + "content": "[5] Yannis M Assael, Brendan Shillingford, Shimon Whiteson, and Nando De Freitas. Lipnet: End-to-end sentence-level lipreading. arXiv preprint arXiv:1611.01599, 2016. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 302, + 294, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 302, + 294, + 345 + ], + "spans": [ + { + "bbox": [ + 62, + 302, + 294, + 345 + ], + "type": "text", + "content": "[6] Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in neural information processing systems, 2020. 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 346, + 294, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 346, + 294, + 390 + ], + "spans": [ + { + "bbox": [ + 62, + 346, + 294, + 390 + ], + "type": "text", + "content": "[7] Helen L Bear, Richard W Harvey, Barry-John Theobald, and Yuxuan Lan. Which phoneme-to-viseme maps best improve visual-only computer lip-reading? In International symposium on visual computing. Springer, 2014. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 391, + 294, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 391, + 294, + 434 + ], + "spans": [ + { + "bbox": [ + 62, + 391, + 294, + 434 + ], + "type": "text", + "content": "[8] Umberto Cappellazzo, Minsu Kim, Honglie Chen, Pingchuan Ma, Stavros Petridis, Daniele Falavigna, Alessio Brutti, and Maja Pantic. Large language models are strong audio-visual speech recognition learners, 2025. 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 435, + 294, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 435, + 294, + 468 + ], + "spans": [ + { + "bbox": [ + 62, + 435, + 294, + 468 + ], + "type": "text", + "content": "[9] Luca Cappelletta and Naomi Harte. Viseme definitions comparison for visual-only speech recognition. In 2011 19th European Signal Processing Conference. IEEE, 2011. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 469, + 294, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 469, + 294, + 523 + ], + "spans": [ + { + "bbox": [ + 57, + 469, + 294, + 523 + ], + "type": "text", + "content": "[10] Oscar Chang, Hank Liao, Dmitriy Serdyuk, Ankit Shahy, and Olivier Siohan. Conformer is all you need for visual speech recognition. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024. 5, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 525, + 294, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 525, + 294, + 546 + ], + "spans": [ + { + "bbox": [ + 57, + 525, + 294, + 546 + ], + "type": "text", + "content": "[11] Joon Son Chung and Andrew Zisserman. Lip reading in the wild. In Asian Conference on Computer Vision, 2016. 3, 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 547, + 294, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 547, + 294, + 580 + ], + "spans": [ + { + "bbox": [ + 57, + 547, + 294, + 580 + ], + "type": "text", + "content": "[12] Martin Cooke, Jon Barker, Stuart Cunningham, and Xu Shao. Grid av speech corpus sample. The Journal of the Accoustical Society of America, 2013. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 581, + 294, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 581, + 294, + 646 + ], + "spans": [ + { + "bbox": [ + 57, + 581, + 294, + 646 + ], + "type": "text", + "content": "[13] Yasser Abdelaziz Dahou Djilali, Sanath Narayan, Haithem Boussaid, Ebtessam Almazrouei, and Merouane Debbah. Lip2vec: Efficient and robust visual speech recognition via latent-to-latent visual to audio representation mapping. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023. 1, 3, 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 647, + 294, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 647, + 294, + 679 + ], + "spans": [ + { + "bbox": [ + 57, + 647, + 294, + 679 + ], + "type": "text", + "content": "[14] Alexey Dosovitskiy. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 2, 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 57, + 681, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 681, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 57, + 681, + 294, + 713 + ], + "type": "text", + "content": "[15] Randa El-Bialy, Daqing Chen, Souheil Fenghour, Walid Hussein, Perry Xiao, Omar H. Karam, and Bo Li. Developing phoneme-based lip-reading sentences system for silent" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 74, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 335, + 74, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 74, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 335, + 74, + 553, + 95 + ], + "type": "text", + "content": "speech recognition. CAAI Transactions on Intelligence Technology, 8(1):129-138, 2023. 3" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 96, + 553, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 96, + 553, + 149 + ], + "spans": [ + { + "bbox": [ + 317, + 96, + 553, + 149 + ], + "type": "text", + "content": "[16] Randa El-Bialy, Daqing Chen, Souheil Fenghour, Walid Hussein, Perry Xiao, Omar H Karam, and Bo Li. Developing phoneme-based lip-reading sentences system for silent speech recognition. CAAI Transactions on Intelligence Technology, 2023. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 151, + 553, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 151, + 553, + 205 + ], + "spans": [ + { + "bbox": [ + 317, + 151, + 553, + 205 + ], + "type": "text", + "content": "[17] A. Ephrat, I. Mosseri, O. Lang, T. Dekel, K Wilson, A. Hassidim, W. T. Freeman, and M. Rubinstein. Looking to listen at the cocktail party: A speaker-independent audio-visual model for speech separation. arXiv preprint arXiv:1804.03619, 2018. 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 206, + 553, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 206, + 553, + 260 + ], + "spans": [ + { + "bbox": [ + 317, + 206, + 553, + 260 + ], + "type": "text", + "content": "[18] Alex Graves, Santiago Fernández, Faustino Gomez, and Jürgen Schmidhuber. Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks. In Proceedings of the 23rd international conference on Machine learning, 2006. 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 261, + 553, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 261, + 553, + 304 + ], + "spans": [ + { + "bbox": [ + 317, + 261, + 553, + 304 + ], + "type": "text", + "content": "[19] Alexandros Haliassos, Pingchuan Ma, Rodrigo Mira, Stavros Petridis, and Maja Pantic. Jointly learning visual and auditory speech representations from raw data. arXiv preprint arXiv:2212.06246, 2022. 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 305, + 553, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 305, + 553, + 349 + ], + "spans": [ + { + "bbox": [ + 317, + 305, + 553, + 349 + ], + "type": "text", + "content": "[20] Alexandros Haliassos, Rodrigo Mira, Honglie Chen, Zoe Landgraf, Stavros Petridis, and Maja Pantic. Unified speech recognition: A single model for auditory, visual, and audio-visual inputs. arXiv preprint arXiv:2411.02256, 2024. 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 350, + 553, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 350, + 553, + 414 + ], + "spans": [ + { + "bbox": [ + 317, + 350, + 553, + 414 + ], + "type": "text", + "content": "[21] Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM transactions on audio, speech, and language processing, 2021. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 415, + 553, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 415, + 553, + 458 + ], + "spans": [ + { + "bbox": [ + 317, + 415, + 553, + 458 + ], + "type": "text", + "content": "[22] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. *ICLR*, 2022. 4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 460, + 553, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 460, + 553, + 493 + ], + "spans": [ + { + "bbox": [ + 317, + 460, + 553, + 493 + ], + "type": "text", + "content": "[23] F. Jelinek, L. Bahl, and R. Mercer. Design of a linguistic statistical decoder for the recognition of continuous speech. IEEE Transactions on Information Theory, 21(3), 1975. 5" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 494, + 553, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 494, + 553, + 557 + ], + "spans": [ + { + "bbox": [ + 317, + 494, + 553, + 557 + ], + "type": "text", + "content": "[24] Hendrik Laux, Emil Mededovic, Ahmed Hallawa, Lukas Martin, Arne Peine, and Anke Schmeink. Litevsr: Efficient visual speech recognition by learning from speech representations of unlabeled data. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024. 3, 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 559, + 553, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 559, + 553, + 634 + ], + "spans": [ + { + "bbox": [ + 317, + 559, + 553, + 634 + ], + "type": "text", + "content": "[25] Xubo Liu, Egor Lakomkin, Konstantinos Vougioukas, Pingchuan Ma, Honglie Chen, Ruiming Xie, Morrie Doulaty, Niko Moritz, Jachym Kolar, Stavros Petridis, et al. Synthvsr: Scaling up visual speech recognition with synthetic supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 3, 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 636, + 553, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 636, + 553, + 690 + ], + "spans": [ + { + "bbox": [ + 317, + 636, + 553, + 690 + ], + "type": "text", + "content": "[26] Camillo Lugaresi, Jiuqiang Tang, Hadon Nash, Chris McClanahan, Esha Uboweja, Michael Hays, Fan Zhang, Chuo-Ling Chang, Ming Guang Yong, Juhyun Lee, et al. Mediapipe: A framework for building perception pipelines. arXiv preprint arXiv:1906.08172, 2019. 4" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 692, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 692, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 692, + 553, + 713 + ], + "type": "text", + "content": "[27] Pingchuan Ma and Alexandros et al. Haliassos. Auto-avsr: Audio-visual speech recognition with automatic labels. In" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2854" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 296, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 77, + 72, + 296, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 72, + 296, + 95 + ], + "spans": [ + { + "bbox": [ + 77, + 72, + 296, + 95 + ], + "type": "text", + "content": "IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2023. 3, 6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 96, + 296, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 96, + 296, + 129 + ], + "spans": [ + { + "bbox": [ + 57, + 96, + 296, + 129 + ], + "type": "text", + "content": "[28] Pingchuan Ma and Rodrigo et al. Mira. Lira: Learning visual speech representations from audio through self-supervision. arXiv, 2021. 3, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 130, + 296, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 130, + 296, + 174 + ], + "spans": [ + { + "bbox": [ + 56, + 130, + 296, + 174 + ], + "type": "text", + "content": "[29] Pingchuan Ma, Stavros Petridis, and Maja Pantic. End-to-end audio-visual speech recognition with conformers. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2021. 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 175, + 295, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 175, + 295, + 229 + ], + "spans": [ + { + "bbox": [ + 56, + 175, + 295, + 229 + ], + "type": "text", + "content": "[30] Takaki Makino, Hank Liao, Yannis Assael, Brendan Shillingford, Basilio Garcia, Otavio Braga, and Olivier Siohan. Recurrent neural network transducer for audio-visual speech recognition. In IEEE automatic speech recognition and understanding workshop (ASRU). IEEE, 2019. 3, 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 231, + 294, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 231, + 294, + 253 + ], + "spans": [ + { + "bbox": [ + 57, + 231, + 294, + 253 + ], + "type": "text", + "content": "[31] William D Marslen-Wilson. Functional parallelism in spoken word-recognition. Cognition, 25(1-2), 1987. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 57, + 254, + 295, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 254, + 295, + 286 + ], + "spans": [ + { + "bbox": [ + 57, + 254, + 295, + 286 + ], + "type": "text", + "content": "[32] William D Marslen-Wilson and Alan Welsh. Processing interactions and lexical access during word recognition in continuous speech. Cognitive psychology, 10(1), 1978. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 57, + 287, + 294, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 287, + 294, + 309 + ], + "spans": [ + { + "bbox": [ + 57, + 287, + 294, + 309 + ], + "type": "text", + "content": "[33] Harry McGurk and John MacDonald. Hearing lips and seeing voices. Nature, 1976. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 57, + 310, + 295, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 310, + 295, + 343 + ], + "spans": [ + { + "bbox": [ + 57, + 310, + 295, + 343 + ], + "type": "text", + "content": "[34] Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. Pointer sentinel mixture models. arXiv preprint arXiv:1609.07843, 2016. 4, 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 345, + 295, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 345, + 295, + 398 + ], + "spans": [ + { + "bbox": [ + 56, + 345, + 295, + 398 + ], + "type": "text", + "content": "[35] Ara V Nefian, Luhong Liang, Xiaobo Pi, Liu Xiaoxiang, Crusoe Mao, and Kevin Murphy. A coupled hmm for audiovisual speech recognition. In 2002 IEEE International Conference on Acoustics, Speech, and Signal Processing. IEEE, 2002. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 400, + 295, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 400, + 295, + 443 + ], + "spans": [ + { + "bbox": [ + 56, + 400, + 295, + 443 + ], + "type": "text", + "content": "[36] Eng-Jon Ong and Richard Bowden. Learning temporal signatures for lip reading. In 2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops). IEEE, 2011. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 445, + 295, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 445, + 295, + 488 + ], + "spans": [ + { + "bbox": [ + 56, + 445, + 295, + 488 + ], + "type": "text", + "content": "[37] Stavros Petridis and Maja Pantic. Deep complementary bottleneck features for visual speech recognition. In 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2016. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 490, + 295, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 490, + 295, + 544 + ], + "spans": [ + { + "bbox": [ + 56, + 490, + 295, + 544 + ], + "type": "text", + "content": "[38] Stavros Petridis, Themos Stafylakis, Pingchuan Ma, Georgios Tzimiropoulos, and Maja Pantic. Audio-visual speech recognition with a hybrid ctc/attention architecture. In 2018 IEEE Spoken Language Technology Workshop (SLT). IEEE, 2018. 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 545, + 295, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 545, + 295, + 590 + ], + "spans": [ + { + "bbox": [ + 56, + 545, + 295, + 590 + ], + "type": "text", + "content": "[39] Javad Peymanfard, Vahid Saeedi, Mohammad Reza Mohammadi, Hossein Zeinali, and Nasser Mozayani. Leveraging visemes for better visual speech representation and lip reading, 2023. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 591, + 295, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 591, + 295, + 634 + ], + "spans": [ + { + "bbox": [ + 56, + 591, + 295, + 634 + ], + "type": "text", + "content": "[40] Gerasimos Potamianos, Chalapathy Neti, Guillaume Gravier, Ashutosh Garg, and Andrew W Senior. Recent advances in the automatic recognition of audiovisual speech. Proceedings of the IEEE, 91(9), 2003. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 635, + 295, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 635, + 295, + 679 + ], + "spans": [ + { + "bbox": [ + 56, + 635, + 295, + 679 + ], + "type": "text", + "content": "[41] KR Prajwal, Triantafyllos Afouras, and Andrew Zisserman. Sub-word level lip reading with visual attention. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022. 1, 3, 5, 6, 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 681, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 681, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 681, + 295, + 713 + ], + "type": "text", + "content": "[42] KR Prajwal, Triantafyllos Afouras, and Andrew Zisserman. Speech recognition models are strong lip-readers. In Proc. Interspeech 2024, 2024. 3, 5, 6, 7" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 316, + 73, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 553, + 95 + ], + "type": "text", + "content": "[43] Alec Radford and Karthik Narasimhan. Improving language understanding by generative pre-training. arXiv, 2018. 3, 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 97, + 553, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 97, + 553, + 140 + ], + "spans": [ + { + "bbox": [ + 317, + 97, + 553, + 140 + ], + "type": "text", + "content": "[44] Dmitriy Serdyuk, Otavio Braga, and Olivier Siohan. Transformer-based video front-ends for audio-visual speech recognition for single and multi-person video. arXiv preprint arXiv:2201.10439, 2022. 3, 4, 6, 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 141, + 553, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 141, + 553, + 174 + ], + "spans": [ + { + "bbox": [ + 316, + 141, + 553, + 174 + ], + "type": "text", + "content": "[45] Bowen Shi, Wei-Ning Hsu, Kushal Lakhotia, and Abdelrahman Mohamed. Learning audio-visual speech representation by masked multimodal cluster prediction. *ICLR*, 2022. 3, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 175, + 553, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 175, + 553, + 218 + ], + "spans": [ + { + "bbox": [ + 316, + 175, + 553, + 218 + ], + "type": "text", + "content": "[46] Brendan Shillingford, Yannis Assael, Matthew W Hoffman, Thomas Paine, Cian Hughes, Utsav Prabhu, Hank Liao, Hasim Sak, Kanishka Rao, Lorrayne Bennett, et al. Large-scale visual speech recognition. InterSpeech, 2018. 3, 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 220, + 553, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 220, + 553, + 262 + ], + "spans": [ + { + "bbox": [ + 316, + 220, + 553, + 262 + ], + "type": "text", + "content": "[47] Joon Son Chung, Andrew Senior, Oriol Vinyals, and Andrew Zisserman. Lip reading sentences in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition, 2017. 3, 5, 6, 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 264, + 553, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 264, + 553, + 308 + ], + "spans": [ + { + "bbox": [ + 316, + 264, + 553, + 308 + ], + "type": "text", + "content": "[48] Themos Stafylakis and Georgios Tzimiropoulos. Zero-shot keyword spotting for visual speech recognition in-the-wild. In Proceedings of the European Conference on Computer Vision (ECCV), pages 513-529, 2018. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 310, + 553, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 310, + 553, + 342 + ], + "spans": [ + { + "bbox": [ + 316, + 310, + 553, + 342 + ], + "type": "text", + "content": "[49] Kwanchiva Thangthai, Helen L Bear, and Richard Harvey. Comparing phonemes and visemes with dnn-based lipreading. arXiv preprint arXiv:1805.02924, 2018. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 344, + 553, + 387 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 344, + 553, + 387 + ], + "spans": [ + { + "bbox": [ + 316, + 344, + 553, + 387 + ], + "type": "text", + "content": "[50] Zhan Tong, Yibing Song, Jue Wang, and Limin Wang. Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training. Advances in neural information processing systems, 35, 2022. 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 388, + 553, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 388, + 553, + 443 + ], + "spans": [ + { + "bbox": [ + 316, + 388, + 553, + 443 + ], + "type": "text", + "content": "[51] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 3, 7, 8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 445, + 553, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 445, + 553, + 476 + ], + "spans": [ + { + "bbox": [ + 316, + 445, + 553, + 476 + ], + "type": "text", + "content": "[52] Carnegie Mellon University. The cmu pronouncing dictionary, 1993. http://www.speech.cs.cmu.edu/cgi-bin/cmudict.2,5" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 478, + 553, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 478, + 553, + 521 + ], + "spans": [ + { + "bbox": [ + 316, + 478, + 553, + 521 + ], + "type": "text", + "content": "[53] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 2017. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 523, + 553, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 523, + 553, + 567 + ], + "spans": [ + { + "bbox": [ + 316, + 523, + 553, + 567 + ], + "type": "text", + "content": "[54] Michael Wand, Jan Koutnik, and Jürgen Schmidhuber. Lipreading with long short-term memory. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2016. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 568, + 553, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 568, + 553, + 612 + ], + "spans": [ + { + "bbox": [ + 316, + 568, + 553, + 612 + ], + "type": "text", + "content": "[55] Michael Wand, Jan Koutnik, and Jürgen Schmidhuber. Lipreading with long short-term memory. In 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2016. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 316, + 613, + 553, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 613, + 553, + 645 + ], + "spans": [ + { + "bbox": [ + 316, + 613, + 553, + 645 + ], + "type": "text", + "content": "[56] Xinshuo Weng and Kris Kitani. Learning spatio-temporal features with two-stream deep 3d cnn for lipreading. arXiv preprint arXiv:1905.02540, 2019. 3" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 647, + 555, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 647, + 555, + 689 + ], + "spans": [ + { + "bbox": [ + 316, + 647, + 555, + 689 + ], + "type": "text", + "content": "[57] Bo Xu, Cheng Lu, Yandong Guo, and Jacob Wang. Discriminative multi-modality speech recognition. In Proceedings of the IEEE/CVF conference on Computer Vision and Pattern Recognition, 2020. 3, 6" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 691, + 553, + 713 + ], + "type": "text", + "content": "[58] Jeong Hun Yeo, Seunghee Han, Minsu Kim, and Yong Man Ro. Where visual speech meets language: Vsp-llm frame" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 748, + 315, + 757 + ], + "type": "text", + "content": "2855" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 295, + 351 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 76, + 72, + 294, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 72, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 294, + 95 + ], + "type": "text", + "content": "work for efficient and context-aware visual speech processing. arXiv preprint arXiv:2402.15151, 2024. 2, 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 96, + 295, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 295, + 150 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 295, + 150 + ], + "type": "text", + "content": "[59] Jeong Hun Yeo, Chae Won Kim, Hyunjun Kim, Hyeongseop Rha, Seunghee Han, Wen-Huang Cheng, and Yong Man Ro. Personalized lip reading: Adapting to your unique lip movements with vision and language. arXiv preprint arXiv:2409.00986, 2024. 2, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 152, + 294, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 152, + 294, + 195 + ], + "spans": [ + { + "bbox": [ + 56, + 152, + 294, + 195 + ], + "type": "text", + "content": "[60] Xingxuan Zhang, Feng Cheng, and Shilin Wang. Spatiotemporal fusion based convolutional sequence learning for lip reading. In Proceedings of the IEEE/CVF International conference on Computer Vision, 2019. 3, 6" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 197, + 294, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 197, + 294, + 251 + ], + "spans": [ + { + "bbox": [ + 56, + 197, + 294, + 251 + ], + "type": "text", + "content": "[61] Yuanhang Zhang, Shuang Yang, Shiguang Shan, and Xilin Chen. Es3: Evolving self-supervised learning of robust audio-visual speech representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024. 6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 253, + 294, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 253, + 294, + 285 + ], + "spans": [ + { + "bbox": [ + 56, + 253, + 294, + 285 + ], + "type": "text", + "content": "[62] Ziheng Zhou, Guoying Zhao, Xiaopeng Hong, and Matti Pietikainen. A review of recent advances in visual speech decoding. Image and vision computing, 32(9), 2014. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 286, + 294, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 286, + 294, + 351 + ], + "spans": [ + { + "bbox": [ + 56, + 286, + 294, + 351 + ], + "type": "text", + "content": "[63] Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhutdinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE international conference on computer vision, 2015. 8" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "spans": [ + { + "bbox": [ + 295, + 749, + 315, + 757 + ], + "type": "text", + "content": "2856" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file