| { |
| "paper_id": "Q13-1003", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:08:22.456641Z" |
| }, |
| "title": "Grounding Action Descriptions in Videos", |
| "authors": [ |
| { |
| "first": "Michaela", |
| "middle": [], |
| "last": "Regneri", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Saarland University", |
| "location": { |
| "settlement": "Saarbr\u00fccken", |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Marcus", |
| "middle": [], |
| "last": "Rohrbach", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Dominikus", |
| "middle": [], |
| "last": "Wetzel", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Saarland University", |
| "location": { |
| "settlement": "Saarbr\u00fccken", |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Thater", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Saarland University", |
| "location": { |
| "settlement": "Saarbr\u00fccken", |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Bernt", |
| "middle": [], |
| "last": "Schiele", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Manfred", |
| "middle": [], |
| "last": "Pinkal", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Saarland University", |
| "location": { |
| "settlement": "Saarbr\u00fccken", |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Recent work has shown that the integration of visual information into text-based models can substantially improve model predictions, but so far only visual information extracted from static images has been used. In this paper, we consider the problem of grounding sentences describing actions in visual information extracted from videos. We present a general purpose corpus that aligns high quality videos with multiple natural language descriptions of the actions portrayed in the videos, together with an annotation of how similar the action descriptions are to each other. Experimental results demonstrate that a text-based model of similarity between actions improves substantially when combined with visual information from videos depicting the described actions.", |
| "pdf_parse": { |
| "paper_id": "Q13-1003", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Recent work has shown that the integration of visual information into text-based models can substantially improve model predictions, but so far only visual information extracted from static images has been used. In this paper, we consider the problem of grounding sentences describing actions in visual information extracted from videos. We present a general purpose corpus that aligns high quality videos with multiple natural language descriptions of the actions portrayed in the videos, together with an annotation of how similar the action descriptions are to each other. Experimental results demonstrate that a text-based model of similarity between actions improves substantially when combined with visual information from videos depicting the described actions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The estimation of semantic similarity between words and phrases is a basic task in computational semantics. Vector-space models of meaning are one standard approach. Following the distributional hypothesis, frequencies of context words are recorded in vectors, and semantic similarity is computed as a proximity measure in the underlying vector space. Such distributional models are attractive because they are conceptually simple, easy to implement and relevant for various NLP tasks (Turney and Pantel, 2010) . At the same time, they provide a substantially incomplete picture of word meaning, since they ignore the relation between language and extralinguistic information, which is constitutive for linguistic meaning. In the last few years, a growing amount of work has been devoted to the task of grounding meaning in visual information, in particular by extending the distributional approach to jointly cover texts and images (Feng and Lapata, 2010; Bruni et al., 2011) . As a clear result, visual information improves the quality of distributional models. Bruni et al. (2011) show that visual information drawn from images is particularly relevant for concrete common nouns and adjectives.", |
| "cite_spans": [ |
| { |
| "start": 485, |
| "end": 510, |
| "text": "(Turney and Pantel, 2010)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 933, |
| "end": 956, |
| "text": "(Feng and Lapata, 2010;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 957, |
| "end": 976, |
| "text": "Bruni et al., 2011)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1064, |
| "end": 1083, |
| "text": "Bruni et al. (2011)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A natural next step is to integrate visual information from videos into a semantic model of event and action verbs. Psychological studies have shown the connection between action semantics and videos (Glenberg, 2002; Howell et al., 2005) , but to our knowledge, we are the first to provide a suitable data source and to implement such a model. The contribution of this paper is three-fold:", |
| "cite_spans": [ |
| { |
| "start": 200, |
| "end": 216, |
| "text": "(Glenberg, 2002;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 217, |
| "end": 237, |
| "text": "Howell et al., 2005)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We present a multimodal corpus containing textual descriptions aligned with high-quality videos. Starting from the video corpus of Rohrbach et al. (2012b) , which contains highresolution video recordings of basic cooking tasks, we collected multiple textual descriptions of each video via Mechanical Turk. We also provide an accurate sentence-level alignment of the descriptions with their respective videos. We expect the corpus to be a valuable resource for computational semantics, and moreover helpful for a variety of purposes, including video understanding and generation of text from videos.", |
| "cite_spans": [ |
| { |
| "start": 133, |
| "end": 156, |
| "text": "Rohrbach et al. (2012b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We provide a gold-standard dataset for the evaluation of similarity models for action verbs and phrases. The dataset has been designed as analogous to the Usage Similarity dataset of and contains pairs of naturallanguage action descriptions plus their associated video segments. Each of the pairs is annotated with a similarity score based on several manual annotations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We report an experiment on similarity modeling of action descriptions based on the video corpus and the gold standard annotation, which demonstrates the impact of scene information from videos. Visual similarity models outperform text-based models; the performance of combined models approaches the upper bound indicated by inter-annotator agreement.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The paper is structured as follows: We first place ourselves in the landscape of related work (Sec. 2), then we introduce our corpus (Sec. 3). Sec. 4 reports our action similarity annotation experiment and Sec. 5 introduces the similarity measures we apply to the annotated data. We outline the results of our evaluation in Sec. 6, and conclude the paper with a summary and directions for future work (Sec. 7).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A large multimodal resource combining language and visual information resulted from the ESP game (von Ahn and Dabbish, 2004) . The dataset contains many images tagged with several one-word labels.", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 124, |
| "text": "(von Ahn and Dabbish, 2004)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The Microsoft Video Description Corpus (Chen and Dolan, 2011, MSVD) is a resource providing textual descriptions of videos. It consists of multiple crowd-sourced textual descriptions of short video snippets. The MSVD corpus is much larger than our corpus, but most of the videos are of relatively low quality and therefore too challenging for state-ofthe-art video processing to extract relevant information. The videos are typically short and summarized with a single sentence. Our corpus contains coherent textual descriptions of longer video sequences, where each sentence is associated with a timeframe. Gupta et al. (2009) present another useful resource: their model learns the alignment of predicate-argument structures with videos and uses the result for action recognition in videos. However, the corpus contains no natural language texts.", |
| "cite_spans": [ |
| { |
| "start": 608, |
| "end": 627, |
| "text": "Gupta et al. (2009)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The connection between natural language sentences and videos has so far been mostly explored by the computer vision community, where different methods for improving action recognition by exploiting linguistic data have been proposed (Gupta and Mooney, 2010; Motwani and Mooney, 2012; Cour et al., 2008; Tzoukermann et al., 2011; Rohrbach et al., 2012b, among others) . Our resource is intended to be used for action recognition as well, but in this paper, we focus on the inverse effect of visual data on language processing. Feng and Lapata (2010) were the first to enrich topic models for newspaper articles with visual information, by incorporating features from article illustrations. They achieve better results when incorporating the visual information, providing an enriched model that pairs a single text with a picture. Bruni et al. (2011) used the ESP game data to create a visually grounded semantic model. Their results outperform purely text-based models using visual information from pictures for the task of modeling noun similarities. They model single words, and mostly visual features lead only to moderate improvements, which might be due to the mixed quality and random choice of the images. Dodge et al. (2012) recently investigated which words can actually be grounded in images at all, producing an automatic classifier for visual words.", |
| "cite_spans": [ |
| { |
| "start": 233, |
| "end": 257, |
| "text": "(Gupta and Mooney, 2010;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 258, |
| "end": 283, |
| "text": "Motwani and Mooney, 2012;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 284, |
| "end": 302, |
| "text": "Cour et al., 2008;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 303, |
| "end": 328, |
| "text": "Tzoukermann et al., 2011;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 329, |
| "end": 366, |
| "text": "Rohrbach et al., 2012b, among others)", |
| "ref_id": null |
| }, |
| { |
| "start": 526, |
| "end": 548, |
| "text": "Feng and Lapata (2010)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 829, |
| "end": 848, |
| "text": "Bruni et al. (2011)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1212, |
| "end": 1231, |
| "text": "Dodge et al. (2012)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "An interesting in-depth study by Mathe et al. (2008) automatically learnt the semantics of motion verbs as abstract features from videos. The study captures 4 actions with 8-10 videos for each of the actions, and would need a perfect object recognition from a visual classifier to scale up. Steyvers (2010) and later Silberer and Lapata (2012) present an alternative approach to incorporating visual information directly: they use so-called feature norms, which consist of human associations for many given words, as a proxy for general perceptual information. Because this model is trained and evaluated on those feature norms, it is not directly comparable to our approach.", |
| "cite_spans": [ |
| { |
| "start": 33, |
| "end": 52, |
| "text": "Mathe et al. (2008)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 291, |
| "end": 306, |
| "text": "Steyvers (2010)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 317, |
| "end": 343, |
| "text": "Silberer and Lapata (2012)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The Restaurant Game by Orkin and Roy (2009) grounds written chat dialogues in actions carried out in a computer game. While this work is outstanding from the social learning perspective, the actions that ground the dialogues are clicks on a screen rather than real-world actions. The dataset has successfully been used to model determiner meaning (Reckman et al., 2011) in the context of the Restaurant Game, but it is unclear how this approach could scale up to content words and other domains.", |
| "cite_spans": [ |
| { |
| "start": 23, |
| "end": 43, |
| "text": "Orkin and Roy (2009)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 347, |
| "end": 369, |
| "text": "(Reckman et al., 2011)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We build our corpus on top of the \"MPII Cooking Composite Activities\" video corpus (Rohrbach et al., 2012b, MPII Composites) , which contains videos of different activities in the cooking domain, e.g., preparing carrots or separating eggs. We extend the existing corpus with multiple textual descriptions collected by crowd-sourcing via Amazon Mechanical Turk 1 (MTurk). To facilitate the alignment of sentences describing activities with their proper video segments, we also obtained approximate timestamps, as described in Sec. 3.2.", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 124, |
| "text": "(Rohrbach et al., 2012b, MPII Composites)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The TACOS Corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "MPII Composites comes with timed goldstandard annotation of low-level activities and participating objects (e.g. OPEN [HAND,DRAWER] or TAKE OUT [HAND,KNIFE,DRAWER] ). By adding textual descriptions (e.g., The person takes a knife from the drawer) and aligning them on the sentence level with videos and low-level annotations, we provide a rich multimodal resource (cf. Fig. 2 ), the \"Saarbr\u00fccken Corpus of Textually Annotated Cooking Scenes\" (TACOS). In particular, the TACOS corpus provides:", |
| "cite_spans": [ |
| { |
| "start": 118, |
| "end": 131, |
| "text": "[HAND,DRAWER]", |
| "ref_id": null |
| }, |
| { |
| "start": 144, |
| "end": 163, |
| "text": "[HAND,KNIFE,DRAWER]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 369, |
| "end": 375, |
| "text": "Fig. 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The TACOS Corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 A collection of coherent textual descriptions for video recordings of activities of medium complexity, as as a basis for empirical discourse-related research, e.g., the selection and granularity of action descriptions in context", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The TACOS Corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 A high-quality alignment of sentences with video segments, supporting the grounding of action descriptions in visual information", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The TACOS Corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 Collections of paraphrases describing the same scene, which result as a by-product from the text-video alignment and can be useful for text generation from videos (among other things)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The TACOS Corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u2022 The alignment of textual activity descriptions with sequences of low-level activities, which may be used to study the decomposition of action verbs into basic activity predicates", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The TACOS Corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "1 mturk.com", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The TACOS Corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We expect that our corpus will encourage and enable future work on various topics in natural language and video processing. In this paper, we will make use of the second aspect only, demonstrating the usefulness of the corpus for the grounding task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The TACOS Corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "After a more detailed description of the basic video corpus and its annotation (Sec. 3.1) we describe the collection of textual descriptions with MTurk (Sec. 3.2), and finally show the assembly and some benchmarks of the final corpus (Sec. 3.3).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The TACOS Corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "MPII Composites contains 212 high resolution video recordings of 1-23 minutes length (4.5 min. on average). 41 basic cooking tasks such as cutting a cucumber were recorded, each between 4 and 8 times. The selection of cooking tasks is based on those proposed at \"Jamie's Home Cooking Skills\". 2 The corpus is recorded in a kitchen environment with a total of 22 subjects. Each video depicts a single task executed by an individual subject.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The video corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The dataset contains expert annotations of lowlevel activity tags. Annotations are provided for segments containing a semantically meaningful cooking related movement pattern. The action must go beyond single body part movements (such as move arm up) and must have the goal of changing the state or location of an object. 60 different activity labels are used for annotation (e.g. PEEL, STIR, TRASH). Each low-level activity tag consists of an activity label (PEEL), a set of associated objects (CARROT, DRAWER,...), and the associated timeframe (starting and ending points of the activity). Associated objects are the participants of an activity, namely tools (e.g. KNIFE), patient (CARROT) and location (CUTTING-BOARD). We provide the coarse-grained role information for patient, location and tool in the corpus data, but we did not use this information in our experiments. The dataset contains a total of 8818 annotated segments, on average 42 per video.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The video corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We collected textual descriptions for a subset of the videos in MPII Composites, restricting collection to tasks that involve manipulation of cooking ingredients. We also excluded tasks with fewer than four video recordings in the corpus, leaving 26 tasks to be described. We randomly selected five videos from each task, except the three tasks for which only four videos are available. This resulted in a total of 127 videos. For each video, we collected 20 different textual descriptions, leading to 2540 annotation assignments. We published these assignments (HITs) on MTurk, using an adapted version 3 of the annotation tool Vatic (Vondrick et al., 2012) .", |
| "cite_spans": [ |
| { |
| "start": 635, |
| "end": 658, |
| "text": "(Vondrick et al., 2012)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Collecting textual video descriptions", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In each assignment, the subject saw one video specified with the task title (e.g. How to prepare an onion), and then was asked to enter at least five and at most 15 complete English sentences to describe the events in the video. The annotation instructions contained example annotations from a kitchen task not contained in our actual dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Collecting textual video descriptions", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Annotators were encouraged to watch each video several times, skipping backward and forward as they wished. They were also asked to take notes while watching, and to sketch the annotation before entering it. Once familiarized with the video, subjects did the final annotation by watching the entire video from beginning to end, without the possibility of further non-sequential viewing. Subjects were asked to enter each sentence as soon as the action described by the sentence was completed. The video playback paused automatically at the beginning of the sentence input. We recorded pause onset for each sentence annotation as an approximate ending timestamp of the described action. The annotators resumed the video manually.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Collecting textual video descriptions", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The tasks required a HIT approval rate of 75% and were open only to workers in the US, in order to increase the general language quality of the English annotations. Each task paid 1.20 USD. Before paying we randomly inspected the annotations and manually checked for quality. The total costs of collecting the annotations amounted to 3,353 USD. The data was obtained within a time frame of 3.5 weeks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Collecting textual video descriptions", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Our corpus is a combination of the MTurk data and MPII Composites, created by filtering out inappropriate material and computing a high-quality alignment of sentences and video segments. The alignment is done by matching the approximate times-3 github.com/marcovzla/vatic/tree/bolt tamps of the MTurk data to the accurate timestamps in MPII Composites.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Putting the TACOS corpus together", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We discarded text instances if people did not time the sentences properly, taking the association of several (or even all) sentences to a single timestamp as an indicator. Whenever we found a timestamp associated with two or more sentences, we discarded the whole instance. Overall, we had to filter out 13% of the text instances, which left us with 2206 textual video descriptions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Putting the TACOS corpus together", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "For the alignment of sentence annotations and video segments, we assign a precise timeframe to each sentence in the following way: We take the timeframes given by the low-level annotation in MPII Composites as a gold standard micro-event segmentation of the video, because they mark all distinct frames that contain activities of interest. We call them elementary frames. The sequence of elementary frames is not necessarily continuous, because idle time is not annotated.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Putting the TACOS corpus together", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The MTurk sentences have end points that constitute a coarse-grained, noisy video segmentation, assuming that each sentence spans the time between the end of the previous sentence and its own ending point. We refine those noisy timeframes to gold frames as shown in Fig. 1 : Each elementary frame (l1-l5) is mapped to a sentence (s1-s3) if its noisy timeframe covers at least half of the elementary frame. We define the final gold sentence frame then as the timespan between the starting point of the first and the ending point of the last elementary frame.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 266, |
| "end": 272, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Putting the TACOS corpus together", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The alignment of descriptions with low-level activities results in a table as given in Fig. 3 . Columns contain the textual descriptions of the videos; rows Top 10 Verbs cut, take, get, put, wash, place, rinse, remove, *pan, peel", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 87, |
| "end": 93, |
| "text": "Fig. 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Putting the TACOS corpus together", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Top 10 Activities move, take out, cut, wash, take apart, add, shake, screw, put in, peel Figure 4 : 10 most frequent verbs and low-level actions in the TACOS corpus. pan is probably often mis-tagged.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 89, |
| "end": 97, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Putting the TACOS corpus together", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "correspond to low-level actions, and each sentence is aligned with the last of its associated low-level actions. As a side effect, we also obtain multiple paraphrases for each sentence, by considering all sentences with the same associated time frame as equivalent realizations of the same action. The corpus contains 17,334 action descriptions (tokens), realizing 11,796 different sentences (types). It consists of 146,771 words (tokens), 75,210 of which are content word instances (i.e. nouns, verbs and adjectives). The verb vocabulary comprises 28,292 verb tokens, realizing 435 lemmas. Since verbs occurring in the corpus typically describe actions, we can note that the linguistic variance for the 58 different low-level activities is quite large. Fig. 4 gives an impression of the action realizations in the corpus, listing the most frequent verbs from the textual data, and the most frequent low-level activities.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 754, |
| "end": 760, |
| "text": "Fig. 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Putting the TACOS corpus together", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "On average, each description covers 2.7 low-level activities, which indicates a clear difference in granularity. 38% of the descriptions correspond to exactly one low-level activity, about a quarter (23%) covers two of them; 16% have 5 or more low-level elements, 2% more than 10. The corpus shows how humans vary the granularity of their descriptions, measured in time or number of low-level activities, and it shows how they vary the linguistic realization of the same action. For example, Fig. 3 contains dice and chop into small pieces as alternative realizations of the low-level activity sequence SLICE -SCRATCH OFF -SLICE.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 492, |
| "end": 498, |
| "text": "Fig. 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Putting the TACOS corpus together", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The descriptions are of varying length (9 words on average), reaching from two-word phrases to detailed descriptions of 65 words. Most sentences are short, consisting of a reference to the person in the video, a participant and an action verb (The person rinses the carrot, He cuts off the two edges). People often specified an instrument (from the faucet), or the resulting state of the action (chop the carrots in small pieces). Occasionally, we find more complex constructions (support verbs, coordinations).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Putting the TACOS corpus together", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "As Fig. 3 indicates, the timestamp-based alignment is pretty accurate; occasional errors occur like He starts chopping the carrot... in NL Sequence 3. The data contains some typos and ungrammatical sentences (He washed carrot), but for our own experiments, the small number of such errors did not lead to any processing problems.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 9, |
| "text": "Fig. 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Putting the TACOS corpus together", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In this section, we present a gold standard dataset, as a basis for the evaluation of visually grounded models of action similarity. We call it the \"Action Similarity Dataset\" (ASim) in analogy to the Usage Similarity dataset (USim) of and Erk et al. (2012) . Similarly to USim, ASim contains a collection of sentence pairs with numerical similarity scores assigned by human annotators. We asked the annotators to focus on the similarity of the activities described rather than on assessing semantic similarity in general. We use sentences from the TACOS corpus and record their timestamps. Thus each sentence comes with the video segment which it describes (these were not shown to the annotators).", |
| "cite_spans": [ |
| { |
| "start": 240, |
| "end": 257, |
| "text": "Erk et al. (2012)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Action Similarity Dataset", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Random selection of annotated sentences from the corpus would lead to a large majority of pairs which are completely dissimilar, or difficult to grade (e.g., He opens the drawer -The person cuts off the ends of the carrot). We constrained the selection process in two ways: First, we consider only sentences describing activities of manipulating an ingredient. The low-level annotation of the video corpus helps us identify candidate descriptions. We exclude rare and special activities, ending up with CUT, SLICE, CHOP, PEEL, TAKE APART, and WASH, which occur reasonably frequently, with a wide distribution over different scenarios. We restrict the candidate set to those sentences whose timespan includes one of these activities. This results in a conceptually more focussed repertoire of descriptions, and at the same time admits full linguistic variation (wash an apple under the faucet -rinse an apple, slice the cucumber -cut the cucumber into slices). Second, we required the pairs to share some lexical material, either the head verb or the manipulated ingredient (or both). 4 More precisely, we composed the ASim dataset from three different subsets:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Selecting action description pairs", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Different activity, same object: This subset contains pairs describing different types of actions carried out on the same type of object (e.g. The man washes the carrot. -She dices the carrot.). Its focus is on the central task of modeling the semantic relation between actions (rather than the objects involved in the activity), since the object head nouns in the descriptions are the same, and the respective video segments show the same type of object.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Selecting action description pairs", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Same activity, same object: Description pairs of this subset will in many cases, but not always, agree in their head verbs. The dataset is useful for exploring the degree to which action descriptions are underspecified with respect to the precise manner of their practical realization. For example, peeling an onion will mostly be done in a rather uniform way, while cut applied to carrot can mean that the carrot is chopped up, or sliced, or cut in halves.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Selecting action description pairs", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Same activity & verb, different object: Description pairs in this subset share head verb and lowlevel activity, but have different objects (e.g. The man washes the carrot. -A girl washes an apple under the faucet.). This dataset enables the exploration of the objects' meaning contribution to the complete action, established by the variation of equivalent actions that are done to different objects.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Selecting action description pairs", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We assembled 900 action description pairs for annotation: 480 pairs share the object; 240 of which have different activities, and the other 240 pairs share the same activity. We included paraphrases describing the same video segment, but we excluded pairs of identical sentences. 420 additional pairs share their head verb, but have different objects.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Selecting action description pairs", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Three native speakers of English were asked to judge the similarity of the action pairs with respect to how they are carried out, rating each sentence pair with a score from 1 (not similar at all) to 5 (the same or nearly the same). They did not see the respective videos, but we noted the relevant kitchen task (i.e. which vegetable was prepared). We asked the annotators explicitly to ignore the actor of the action (e.g. whether it is a man or a woman) and score the similarities of the underlying actions rather than their verbalizations. Each subject rated all 900 pairs, which were shown to them in completely random order, with a different order for each subject. We compute inter-annotator agreement (and the forthcoming evaluation scores) using Spearman's rank correlation coefficient (\u03c1), a non-parametric test which is widely used for similar evaluation tasks (Mitchell and Lapata, 2008; Bruni et al., 2011; . Spearman's \u03c1 evaluates how the samples are ranked relative to each other rather than the numerical distance between the rankings. Fig. 5 shows the average similarity ratings in the different settings and the inter-annotator agreement. The average inter-rater agreement was \u03c1 = 0.73 (averaged over pairwise rater agreements), with pairwise results of \u03c1 = 0.77, 0.72, and 0.69, respectively, which are all highly significant at p < 0.001.", |
| "cite_spans": [ |
| { |
| "start": 871, |
| "end": 898, |
| "text": "(Mitchell and Lapata, 2008;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 899, |
| "end": 918, |
| "text": "Bruni et al., 2011;", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1051, |
| "end": 1057, |
| "text": "Fig. 5", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Manual annotation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "As expected, pairs with the same activity and object are rated very similar (4.19) on average, while the similarity of different activities on the same object is the lowest (2.2). For both subsets, inter-rater agreement is high (\u03c1 = 0.73), and even higher for both SAME OBJECT subsets together (0.84).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual annotation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Pairs with identical head verbs and different objects have a small standard deviation, at 0.69. The inter-annotator agreement on this set is much lower than for pairs from the SAME OBJECT set. This indicates that similarity assessment for different variants of the same activity is a hard task even for humans.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual annotation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In the following, we demonstrate that visual information contained in videos of the kind provided by the TACOS corpus (Sec. 3) substantially contributes to the semantic modeling of action-denoting expressions. In Sec. 6, we evaluate several methods for predicting action similarity on the task provided by the ASim dataset. In this section, we describe the models considered in the evaluation. We use two different models based on visual information, and in addition two text based models. We will also explore the effect of combining linguistic and visual information and investigate which mode is most suitable for which kinds of similarity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models of Action Similarity", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We use two different models of textual similarity to predict action similarity: a simple word-overlap measure (Jaccard coefficient) and a state-of-the-art model based on \"contextualized\" vector representations of word meaning (Thater et al., 2011) .", |
| "cite_spans": [ |
| { |
| "start": 226, |
| "end": 247, |
| "text": "(Thater et al., 2011)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text-based models", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Jaccard coefficient. The Jaccard coefficient gives the ratio between the number of (distinct) words common to two input sentences and the total number of (distinct) words in the two sentences. Such simple surface-oriented measures of textual similarity are often used as baselines in related tasks such as recognizing textual entailment (Dagan et al., 2005) and are known to deliver relatively strong results.", |
| "cite_spans": [ |
| { |
| "start": 337, |
| "end": 357, |
| "text": "(Dagan et al., 2005)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text-based models", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Vector model. We use the vector model of Thater et al. (2011) , which \"contextualizes\" vector representations for individual words based on the particular sentence context in which the target word occurs. The basic intuition behind this approach is that the words in the syntactic context of the target word in a given input sentence can be used to refine or disambiguate its vector. Intuitively, this allows us to discriminate between different actions that a verb can refer to, based on the different objects of the action.", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 61, |
| "text": "Thater et al. (2011)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text-based models", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We first experimented with a version of this vector model which predicts action similarity scores of two input sentences by computing the cosine similarity of the contextualized vectors of the verbs in the two sentences only. We achieved better performance with a variant of this model which computes vectors for the two sentences by summing over the contextualized vectors of all constituent content words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text-based models", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In the experiments reported below, we only use the second variant. We use the same experimental setup as Thater et al. (2011) , as well as the parameter settings that are reported to work best in that paper.", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 125, |
| "text": "Thater et al. (2011)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text-based models", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We distinguish two approaches to compute the similarity between two video segments. In the first, unsupervised approach we extract a video descriptor and compute similarities between these raw features (Wang et al., 2011) . The second approach builds upon the first by additionally learning higher level attribute classifiers (Rohrbach et al., 2012b ) on a held out training set. The similarity between two segments is then computed between the classifier responses. In the following we detail both approaches:", |
| "cite_spans": [ |
| { |
| "start": 202, |
| "end": 221, |
| "text": "(Wang et al., 2011)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 326, |
| "end": 349, |
| "text": "(Rohrbach et al., 2012b", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Video-based models", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Raw visual features. We use the state-of-the-art video descriptor Dense Trajectories (Wang et al., 2011) which extracts visual video features, namely histograms of oriented gradients, flow, and motion boundary histograms, around densely sampled and tracked points.", |
| "cite_spans": [ |
| { |
| "start": 85, |
| "end": 104, |
| "text": "(Wang et al., 2011)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Video-based models", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "This approach is especially suited for this data as it ignores non-moving parts in the video: we are interested in activities and manipulation of objects, and this type of feature implicitly uses only information in relevant image locations. For our setting this feature representation has been shown to be superior to human pose-based approaches (Rohrbach et al., 2012a) . Using a bag-of-words representation we encode the features using a 16,000 dimensional codebook. Features and codebook are provided with the publicly available video dataset.", |
| "cite_spans": [ |
| { |
| "start": 347, |
| "end": 371, |
| "text": "(Rohrbach et al., 2012a)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Video-based models", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We compute the similarity between two encoded features by computing the intersection of the two (normalized) histograms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Video-based models", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Visual classifiers. Visual raw features tend to have several dimensions in the feature space which provide unreliable, noisy values and thus degrade the strength of the similarity measure. Intermediate level attribute classifiers can learn which feature dimensions are distinctive and thus significantly improve performance over raw features. Rohrbach et al. (2012b) showed that using such an attribute classifier representation can significantly improve per- formance for composite activity recognition. The relevant attributes are all activities and objects annotated in the video data (cf. Section 3.1). For the experiments reported below we use the same setup as Rohrbach et al. (2012b) and use all videos in MPII Composites and MPII Cooking (Rohrbach et al., 2012a) , excluding the 127 videos used during evaluation. The real-valued SVM-classifier output provides a confidence how likely a certain attribute appeared in a given video segment. This results in a 218-dimensional vector of classifier outputs for each video segment. To compute the similarity between two vectors we compute the cosine between them.", |
| "cite_spans": [ |
| { |
| "start": 343, |
| "end": 366, |
| "text": "Rohrbach et al. (2012b)", |
| "ref_id": null |
| }, |
| { |
| "start": 667, |
| "end": 690, |
| "text": "Rohrbach et al. (2012b)", |
| "ref_id": null |
| }, |
| { |
| "start": 746, |
| "end": 770, |
| "text": "(Rohrbach et al., 2012a)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Video-based models", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We evaluate the different similarity models introduced in Sec. 5 by calculating their correlation with the gold-standard similarity annotations of ASim (cf. Sec. 4). For all correlations, we use Spearman's \u03c1 as a measure. We consider the two textual measures (JACCARD and TEXTUAL VECTORS) and their combination, as well as the two visual models (VISUAL RAW VECTORS and VISUAL CLAS-SIFIER) and their combination. We also combined textual and visual features, in two variants: The first includes all models (ALL COMBINED), the second only the unsupervised components, omitting the visual classifier (ALL UNSUPERVISED). To combine multiple similarity measures, we simply average their normalized scores (using z-scores). Figure 6 shows the scores for all of these measures on the complete ASim dataset (OVERALL), along with the two subparts, where description pairs share either the object (SAME OBJECT) or the head verb (SAME VERB). In addition to the model results, the table also shows the average human interannotator agreement as UPPER BOUND.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 718, |
| "end": 726, |
| "text": "Figure 6", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "6" |
| }, |
| { |
| "text": "On the complete set, both visual and textual measures have a highly significant correlation with the gold standard, whereas the combination of both clearly leads to the best performance (0.55). The results on the SAME OBJECT and SAME VERB subsets shed light on the division of labor between the two information sources. While the textual measures show a comparable performance over the two subsets, there is a dramatic difference in the contribution of visual information: On the SAME OBJECT set, the visual models clearly outperform the textual ones, whereas the visual information has no positive effect on the SAME VERB set. This is clear evidence that the visual model does not capture the similarity of the participating objects but rather genuine action similarity, which the visual features (Wang et al., 2011) we employ were designed for. A direction for future work is to learn dedicated visual object detectors to recognize and capture similarities between objects more precisely.", |
| "cite_spans": [ |
| { |
| "start": 798, |
| "end": 817, |
| "text": "(Wang et al., 2011)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The numbers shown in Figure 7 support this hypothesis, showing the two groups in the SAME OB-JECT class: For sentence pairs that share the same activity, the textual models seem to be much more suitable than the visual ones. In general, visual models perform better on actions with different activity types, textual models on closely related activities. Overall, the supervised classifier contributes a good part to the final results. However, the supervision is not strictly necessary to arrive at a significant correlation; the raw visual features alone are sufficient for the main performance gain seen with the integration of visual information.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 21, |
| "end": 29, |
| "text": "Figure 7", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We presented the TACOS corpus, which provides coherent textual descriptions for high-quality video recordings, plus accurate alignments of text and video on the sentence level. We expect the corpus to be beneficial for a variety of research activities in natural-language and visual processing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In this paper, we focused on the task of grounding the meaning of action verbs and phrases. We designed the ASim dataset as a gold standard and evaluated several text-and video-based semantic similarity models on the dataset, both individually and in different combinations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We are the first to provide semantic models for action-describing expressions, which are based on information extracted from videos. Our experimental results show that these models are of considerable quality, and that predictions based on a combination of visual and textual information even approach the upper bound given by the agreement of human annotators.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In this work we used existing similarity models that had been developed for different applications. We applied these models without any special training or optimization for the current task, and we combined them in the most straightforward way. There is room for improvement by tuning the models to the task, or by using more sophisticated approaches to combine modality-specific information (Silberer and Lapata, 2012) .", |
| "cite_spans": [ |
| { |
| "start": 392, |
| "end": 419, |
| "text": "(Silberer and Lapata, 2012)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We built our work on an existing corpus of highquality video material, which is restricted to the cooking domain. As a consequence, the corpus covers only a limited inventory of activity types and action verbs. Note, however, that our models are fully unsupervised (except the Visual Classifier model), and thus can be applied without modification to arbitrary domains and action verbs, given that they are about observable activities. Also, corpora containing information comparable to the TACOS corpus but with wider coverage (and perhaps a bit noisier) can be obtained with a moderate amount of effort. One needs videos of reasonable quality and some sort of alignment with action descriptions. In some cases such alignments even come for free, e.g. via subtitles, or descriptions of short video clips that depict just a single action.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "For future work, we will further investigate the compositionality of action-describing phrases. We also want to leverage the multimodal information provided by the TACOS corpus for the improvement of high-level video understanding, as well as for generation of natural-language text from videos.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "The TACOS corpus and all other data described in this paper (videos, low-level annotation, aligned textual descriptions, the ASim-Dataset and visual features) are publicly available. 5 ", |
| "cite_spans": [ |
| { |
| "start": 183, |
| "end": 184, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "www.jamieshomecookingskills.com", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We refer to the latter with the term object; we don't require the ingredient term to be the actual grammatical object in the action descriptions, we rather use \"object\" in its semantic role sense as the entity affected by an action.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.coli.uni-saarland.de/ projects/smile/page.php?id=tacos", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We'd like to thank Asad Sayeed, Alexis Palmer and Prashant Rao for their help with the annotations. We're indebted to Carl Vondrick and Marco Antonio Valenzuela Escrcega for their extensive support with the video annotation tool. Further we thank Alexis Palmer and in particular three anonymous reviewers for their helpful comments on this paper. -This work was funded by the Cluster of Excellence \"Multimodal Computing and Interaction\" of the German Excellence Initiative and the DFG project SCHI989/2-2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Labeling images with a computer game", |
| "authors": [ |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Luis Von Ahn", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dabbish", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of SIGCHI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luis von Ahn and Laura Dabbish. 2004. Labeling images with a computer game. In Proceedings of SIGCHI 2004.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Distributional semantics from text and images", |
| "authors": [ |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Bruni", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Giang Binh Tran", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of GEMS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elia Bruni, Giang Binh Tran, and Marco Baroni. 2011. Distributional semantics from text and images. In Pro- ceedings of GEMS 2011.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Collecting highly parallel data for paraphrase evaluation", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "B" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David L. Chen and William B. Dolan. 2011. Collect- ing highly parallel data for paraphrase evaluation. In Proceedings of ACL 2011.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Movie/script: Alignment and parsing of video and text transcription", |
| "authors": [ |
| { |
| "first": "Timothee", |
| "middle": [], |
| "last": "Cour", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Jordan", |
| "suffix": "" |
| }, |
| { |
| "first": "Eleni", |
| "middle": [], |
| "last": "Miltsakaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Taskar", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Computer Vision -ECCV", |
| "volume": "5305", |
| "issue": "", |
| "pages": "158--171", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timothee Cour, Chris Jordan, Eleni Miltsakaki, and Ben Taskar. 2008. Movie/script: Alignment and parsing of video and text transcription. In Computer Vision -ECCV 2008, volume 5305 of Lecture Notes in Com- puter Science, pages 158-171. Springer Berlin Heidel- berg.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "The PASCAL recognising textual entailment challenge", |
| "authors": [ |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Ido Dagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernardo", |
| "middle": [], |
| "last": "Glickman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Magnini", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of MLCW 2005", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ido Dagan, Oren Glickman, and Bernardo Magnini. 2005. The PASCAL recognising textual entailment challenge. In Proceedings of MLCW 2005.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Detecting visual text", |
| "authors": [ |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Dodge", |
| "suffix": "" |
| }, |
| { |
| "first": "Amit", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Xufeng", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Alyssa", |
| "middle": [], |
| "last": "Mensch", |
| "suffix": "" |
| }, |
| { |
| "first": "Margaret", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [], |
| "last": "Stratos", |
| "suffix": "" |
| }, |
| { |
| "first": "Kota", |
| "middle": [], |
| "last": "Yamaguchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "C" |
| ], |
| "last": "Berg", |
| "suffix": "" |
| }, |
| { |
| "first": "Tamara", |
| "middle": [ |
| "L" |
| ], |
| "last": "Berg", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "762--772", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jesse Dodge, Amit Goyal, Xufeng Han, Alyssa Men- sch, Margaret Mitchell, Karl Stratos, Kota Yamaguchi, Yejin Choi, Hal Daum\u00e9 III, Alexander C. Berg, and Tamara L. Berg. 2012. Detecting visual text. In HLT- NAACL, pages 762-772.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Graded word sense assignment", |
| "authors": [ |
| { |
| "first": "Katrin", |
| "middle": [], |
| "last": "Erk", |
| "suffix": "" |
| }, |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Mccarthy", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katrin Erk and Diana McCarthy. 2009. Graded word sense assignment. In Proceedings of EMNLP 2009.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Investigations on word senses and word usages", |
| "authors": [ |
| { |
| "first": "Katrin", |
| "middle": [], |
| "last": "Erk", |
| "suffix": "" |
| }, |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Mccarthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicholas", |
| "middle": [], |
| "last": "Gaylord", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of ACL/AFNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katrin Erk, Diana McCarthy, and Nicholas Gaylord. 2009. Investigations on word senses and word usages. In Proceedings of ACL/AFNLP 2009.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Measuring word meaning in context", |
| "authors": [ |
| { |
| "first": "Katrin", |
| "middle": [], |
| "last": "Erk", |
| "suffix": "" |
| }, |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Mccarthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Nick", |
| "middle": [], |
| "last": "Gaylord", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Katrin Erk, Diana McCarthy, and Nick Gaylord. 2012. Measuring word meaning in context. CL.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Visual information in semantic representation", |
| "authors": [ |
| { |
| "first": "Yansong", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yansong Feng and Mirella Lapata. 2010. Visual infor- mation in semantic representation. In Proceedings of HLT-NAACL 2010.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Grounding language in action", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "M" |
| ], |
| "last": "Glenberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Psychonomic Bulletin & Review", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. M. Glenberg. 2002. Grounding language in action. Psychonomic Bulletin & Review.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Using closed captions as supervision for video activity recognition", |
| "authors": [ |
| { |
| "first": "Sonal", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Raymond", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Twenty-Fourth AAAI Conference on Artificial Intelligence (AAAI-2010)", |
| "volume": "", |
| "issue": "", |
| "pages": "1083--1088", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sonal Gupta and Raymond J. Mooney. 2010. Us- ing closed captions as supervision for video activ- ity recognition. In Proceedings of the Twenty-Fourth AAAI Conference on Artificial Intelligence (AAAI- 2010), pages 1083-1088, Atlanta, GA, July.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Understanding videos, constructing plots learning a visually grounded storyline model from annotated videos", |
| "authors": [ |
| { |
| "first": "Abhinav", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Praveen", |
| "middle": [], |
| "last": "Srinivasan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianbo", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Larry", |
| "middle": [ |
| "S" |
| ], |
| "last": "Davis", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of CVPR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abhinav Gupta, Praveen Srinivasan, Jianbo Shi, and Larry S. Davis. 2009. Understanding videos, con- structing plots learning a visually grounded storyline model from annotated videos. In Proceedings of CVPR 2009.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A model of grounded language acquisition: Sensorimotor features improve lexical and grammatical learning", |
| "authors": [ |
| { |
| "first": "Steve", |
| "middle": [ |
| "R" |
| ], |
| "last": "Howell", |
| "suffix": "" |
| }, |
| { |
| "first": "Damian", |
| "middle": [], |
| "last": "Jankowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Suzanna", |
| "middle": [], |
| "last": "Becker", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steve R. Howell, Damian Jankowicz, and Suzanna Becker. 2005. A model of grounded language ac- quisition: Sensorimotor features improve lexical and grammatical learning. JML.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Learning the abstract motion semantics of verbs from captioned videos", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Mathe", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Fazly", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Dickinson", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Stevenson", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Mathe, A. Fazly, S. Dickinson, and S. Stevenson. 2008. Learning the abstract motion semantics of verbs from captioned videos. pages 1-8.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Vector-based models of semantic composition", |
| "authors": [ |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeff Mitchell and Mirella Lapata. 2008. Vector-based models of semantic composition. In Proceedings of ACL 2008.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Improving video activity recognition using object recognition and text mining", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Tanvi", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Motwani", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 20th", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tanvi S. Motwani and Raymond J. Mooney. 2012. Im- proving video activity recognition using object recog- nition and text mining. In Proceedings of the 20th", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "European Conference on Artificial Intelligence (ECAI-2012)", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "600--605", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "European Conference on Artificial Intelligence (ECAI- 2012), pages 600-605, August.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Automatic learning and generation of social behavior from collective human gameplay", |
| "authors": [ |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Orkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Deb", |
| "middle": [], |
| "last": "Roy", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of AAMAS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeff Orkin and Deb Roy. 2009. Automatic learning and generation of social behavior from collective human gameplay. In Proceedings of AAMAS 2009.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Extracting aspects of determiner meaning from dialogue in a virtual world environment", |
| "authors": [ |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Hilke Reckman", |
| "suffix": "" |
| }, |
| { |
| "first": "Deb", |
| "middle": [], |
| "last": "Orkin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Roy", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of CCS 2011, IWCS '11", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hilke Reckman, Jeff Orkin, and Deb Roy. 2011. Ex- tracting aspects of determiner meaning from dialogue in a virtual world environment. In Proceedings of CCS 2011, IWCS '11.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "A database for fine grained activity detection of cooking activities", |
| "authors": [ |
| { |
| "first": "Marcus", |
| "middle": [], |
| "last": "Rohrbach", |
| "suffix": "" |
| }, |
| { |
| "first": "Sikandar", |
| "middle": [], |
| "last": "Amin", |
| "suffix": "" |
| }, |
| { |
| "first": "Mykhaylo", |
| "middle": [], |
| "last": "Andriluka", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernt", |
| "middle": [], |
| "last": "Schiele", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of CVPR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcus Rohrbach, Sikandar Amin, Mykhaylo Andriluka, and Bernt Schiele. 2012a. A database for fine grained activity detection of cooking activities. In Proceedings of CVPR 2012.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Sikandar Amin, Manfred Pinkal, and Bernt Schiele. 2012b. Script data for attribute-based recognition of composite activities", |
| "authors": [ |
| { |
| "first": "Marcus", |
| "middle": [], |
| "last": "Rohrbach", |
| "suffix": "" |
| }, |
| { |
| "first": "Michaela", |
| "middle": [], |
| "last": "Regneri", |
| "suffix": "" |
| }, |
| { |
| "first": "Micha", |
| "middle": [], |
| "last": "Andriluka", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of ECCV", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcus Rohrbach, Michaela Regneri, Micha Andriluka, Sikandar Amin, Manfred Pinkal, and Bernt Schiele. 2012b. Script data for attribute-based recognition of composite activities. In Proceedings of ECCV 2012.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Grounded models of semantic representation", |
| "authors": [ |
| { |
| "first": "Carina", |
| "middle": [], |
| "last": "Silberer", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of EMNLP-CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carina Silberer and Mirella Lapata. 2012. Grounded models of semantic representation. In Proceedings of EMNLP-CoNLL 2012.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Combining feature norms and text data with topic models", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steyvers", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Acta Psychologica", |
| "volume": "133", |
| "issue": "3", |
| "pages": "234--243", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Steyvers. 2010. Combining feature norms and text data with topic models. Acta Psychologica, 133(3):234 -243. \u00a1ce:title\u00bfFormal modeling of se- mantic concepts\u00a1/ce:title\u00bf.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Word meaning in context: A simple and effective vector model", |
| "authors": [ |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Thater", |
| "suffix": "" |
| }, |
| { |
| "first": "Hagen", |
| "middle": [], |
| "last": "F\u00fcrstenau", |
| "suffix": "" |
| }, |
| { |
| "first": "Manfred", |
| "middle": [], |
| "last": "Pinkal", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of IJCNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefan Thater, Hagen F\u00fcrstenau, and Manfred Pinkal. 2011. Word meaning in context: A simple and effec- tive vector model. In Proceedings of IJCNLP 2011.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "From frequency to meaning. vector space models for semantics", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Turney", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pantel", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter D. Turney and Patrick Pantel. 2010. From fre- quency to meaning. vector space models for semantics. JAIR.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Language models for semantic extraction and filtering in video action recognition", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Tzoukermann", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kosecka", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Fermuller", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Perera", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Ferraro", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Sapp", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Chaudhry", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "AAAI Workshop on Language-Action Tools for Cognitive Artificial Agents", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. Tzoukermann, J. Neumann, J. Kosecka, C. Fermuller, I. Perera, F. Ferraro, B. Sapp, R. Chaudhry, and G. Singh. 2011. Language models for semantic ex- traction and filtering in video action recognition. In AAAI Workshop on Language-Action Tools for Cogni- tive Artificial Agents.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Efficiently scaling up crowdsourced video annotation", |
| "authors": [ |
| { |
| "first": "Carl", |
| "middle": [], |
| "last": "Vondrick", |
| "suffix": "" |
| }, |
| { |
| "first": "Donald", |
| "middle": [], |
| "last": "Patterson", |
| "suffix": "" |
| }, |
| { |
| "first": "Deva", |
| "middle": [], |
| "last": "Ramanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carl Vondrick, Donald Patterson, and Deva Ramanan. 2012. Efficiently scaling up crowdsourced video an- notation. IJCV.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Action Recognition by Dense Trajectories", |
| "authors": [ |
| { |
| "first": "Heng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Kl\u00e4ser", |
| "suffix": "" |
| }, |
| { |
| "first": "Cordelia", |
| "middle": [], |
| "last": "Schmid", |
| "suffix": "" |
| }, |
| { |
| "first": "Cheng-Lin", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of CVPR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heng Wang, Alexander Kl\u00e4ser, Cordelia Schmid, and Cheng-Lin Liu. 2011. Action Recognition by Dense Trajectories. In Proceedings of CVPR 2011.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "text": "Aligning action descriptions with the video.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "text": "Excerpt from the corpus for a video on PREPARING A CARROT. Example frames, low-level annotation (Action and Participants) is shown along with three of the MTurk sequences (NL Sequence 1-3).", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "text": "Average similarity ratings (Sim), their standard deviation (\u03c3)) and annotator agreement (\u03c1) for ASim.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "text": "Evaluation results in Spearman's \u03c1. All values > 0.11 are significant at p < 0.001.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF4": { |
| "type_str": "figure", |
| "text": "Results for sentences with the same object, with either the same or different low-level activity.", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "content": "<table><tr><td>896 -1137 1145 -1212 1330 -1388 1431 -1647 1647 -1669 1673 -1705 1736 -1818 1919 -3395</td><td colspan=\"3\">wash shake close move move cut slice take out [hand,knife,drawer] [hand,carrot] [hand,carrot] [knife,carrot,cutting board] [knife,carrot,cutting board] [hand,carrot,bowl,cutting board] [hand,cutting board,counter] [hand,drawer] manual low-level annotation</td><td colspan=\"2\">Natural language descriptions Mechanical Turk data collection > 890: Videos of basic kitchen tasks</td></tr><tr><td colspan=\"4\">Low level annotations with timestamps, actions and objects</td><td colspan=\"2\">timestamp-based alignment</td><td>with ending times of the actions</td></tr><tr><td/><td/><td colspan=\"4\">Figure 2: Corpus Overview</td></tr><tr><td colspan=\"5\">Sample frame Start End Action Participants NL Sequence 1</td><td>NL Sequence 2</td><td>NL Sequence 3</td></tr><tr><td/><td>743 911 wash</td><td>hand, carrot</td><td colspan=\"3\">He washed carrot The person rinses the</td><td>He rinses the carrot from</td></tr><tr><td/><td/><td/><td/><td/><td>carrot.</td><td>the faucet.</td></tr><tr><td/><td>982 1090 cut</td><td>knife, carrot,</td><td colspan=\"2\">He cut off ends of</td><td>The person cuts off</td><td>He cuts off the two edges.</td></tr><tr><td/><td/><td>cutting board</td><td>carrots</td><td/><td>the ends of the carrot.</td></tr><tr><td/><td>1164 1257 open</td><td>hand, drawer</td><td/><td/></tr><tr><td/><td colspan=\"2\">1679 1718 close hand, drawer</td><td/><td/><td>He searches for some-</td></tr><tr><td/><td/><td/><td/><td/><td>thing in the drawer, failed</td></tr><tr><td/><td/><td/><td/><td/><td>attempt, he throws away</td></tr><tr><td/><td/><td/><td/><td/><td>the edges in trash.</td></tr><tr><td/><td>1746 1799 trash</td><td>hand, carrot</td><td/><td/><td>The person searches</td></tr><tr><td/><td/><td/><td/><td/><td>for the trash can, then</td></tr><tr><td/><td/><td/><td/><td/><td>throws the ends of</td></tr><tr><td/><td/><td/><td/><td/><td>the carrot away.</td></tr><tr><td/><td>1854 2011 wash</td><td>hand, carrot</td><td/><td/><td>He rinses the carrot again.</td></tr><tr><td/><td colspan=\"2\">2011 2045 shake hand, carrot</td><td colspan=\"3\">He washed carrot The person rinses the</td><td>He starts chopping the</td></tr><tr><td/><td/><td/><td/><td/><td>carrot again.</td><td>carrot in small pieces.</td></tr><tr><td/><td>2083 2924 slice</td><td>knife, carrot,</td><td/><td/></tr><tr><td/><td/><td>cutting board</td><td/><td/></tr><tr><td/><td>2924 2959 scratch</td><td>hand, carrot,</td><td/><td/></tr><tr><td/><td>off</td><td>knife, cutting</td><td/><td/></tr><tr><td/><td/><td>board</td><td/><td/></tr><tr><td/><td>3000 3696 slice</td><td>knife, carrot,</td><td colspan=\"2\">He diced carrots</td><td>He finished chopping the</td></tr><tr><td/><td/><td>cutting board</td><td/><td/><td>carrots in small pieces.</td></tr></table>", |
| "html": null, |
| "num": null, |
| "text": "The man takes out a cutting board. > 1300: He washes a carrot. > 1500: He takes out a knife. > 4000: He slices the carrot." |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "content": "<table><tr><td>TEXT</td><td>JACCARD TEXT VECTORS TEXT COMBINED</td><td>0.44 0.42 0.52</td><td>0.14 0.05 0.14</td></tr><tr><td>VIDEO</td><td>VIS. RAW VECTORS VIS. CLASSIFIER VIDEO COMBINED</td><td>0.21 0.21 0.26</td><td>0.23 0.45 0.38</td></tr><tr><td>MIX</td><td>ALL UNSUPERVISED ALL COMBINED</td><td>0.49 0.48</td><td>0.24 0.41</td></tr><tr><td colspan=\"2\">UPPER BOUND</td><td>0.73</td><td>0.73</td></tr></table>", |
| "html": null, |
| "num": null, |
| "text": "MODEL (SAME OBJECT) same action diff. action" |
| } |
| } |
| } |
| } |