| { |
| "paper_id": "L18-1037", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T11:32:26.512224Z" |
| }, |
| "title": "A Recorded Debating Dataset", |
| "authors": [ |
| { |
| "first": "Shachar", |
| "middle": [], |
| "last": "Mirkin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research -Haifa", |
| "location": { |
| "country": "Israel" |
| } |
| }, |
| "email": "shacharm@il.ibm.com" |
| }, |
| { |
| "first": "Michal", |
| "middle": [], |
| "last": "Jacovi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research -Haifa", |
| "location": { |
| "country": "Israel" |
| } |
| }, |
| "email": "michal.jacovi@il.ibm.com" |
| }, |
| { |
| "first": "Tamar", |
| "middle": [], |
| "last": "Lavee", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research -Haifa", |
| "location": { |
| "country": "Israel" |
| } |
| }, |
| "email": "tamar.lavee1@ibm.com" |
| }, |
| { |
| "first": "Hong-Kwang", |
| "middle": [], |
| "last": "Kuo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Watson -Yorktown Heights", |
| "location": { |
| "region": "New York", |
| "country": "USA" |
| } |
| }, |
| "email": "hkuo@us.ibm.com" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Watson -Yorktown Heights", |
| "location": { |
| "region": "New York", |
| "country": "USA" |
| } |
| }, |
| "email": "sthomas@us.ibm.com" |
| }, |
| { |
| "first": "Leslie", |
| "middle": [], |
| "last": "Sager", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "sagerleslie@gmail.com" |
| }, |
| { |
| "first": "Lili", |
| "middle": [], |
| "last": "Kotlerman", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research -Haifa", |
| "location": { |
| "country": "Israel" |
| } |
| }, |
| "email": "lili.kotlerman@il.ibm.com" |
| }, |
| { |
| "first": "Elad", |
| "middle": [], |
| "last": "Venezian", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research -Haifa", |
| "location": { |
| "country": "Israel" |
| } |
| }, |
| "email": "eladv@il.ibm.com" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Slonim", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research -Haifa", |
| "location": { |
| "country": "Israel" |
| } |
| }, |
| "email": "noams@il.ibm.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes an English audio and textual dataset of debating speeches, a unique resource for the growing research field of computational argumentation and debating technologies. We detail the process of speech recording by professional debaters, the transcription of the speeches with an Automatic Speech Recognition (ASR) system, their consequent automatic processing to produce a text that is more \"NLP-friendly\", and in parallel-the manual transcription of the speeches in order to produce gold-standard \"reference\" transcripts. We release 60 speeches on various controversial topics, each in five formats corresponding to the different stages in the production of the data. The intention is to allow utilizing this resource for multiple research purposes, be it the addition of in-domain training data for a debate-specific ASR system, or applying argumentation mining on either noisy or clean debate transcripts. We intend to make further releases of this data in the future.", |
| "pdf_parse": { |
| "paper_id": "L18-1037", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes an English audio and textual dataset of debating speeches, a unique resource for the growing research field of computational argumentation and debating technologies. We detail the process of speech recording by professional debaters, the transcription of the speeches with an Automatic Speech Recognition (ASR) system, their consequent automatic processing to produce a text that is more \"NLP-friendly\", and in parallel-the manual transcription of the speeches in order to produce gold-standard \"reference\" transcripts. We release 60 speeches on various controversial topics, each in five formats corresponding to the different stages in the production of the data. The intention is to allow utilizing this resource for multiple research purposes, be it the addition of in-domain training data for a debate-specific ASR system, or applying argumentation mining on either noisy or clean debate transcripts. We intend to make further releases of this data in the future.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Computational argumentation and debating technologies aim to automate the extraction, understanding and generation of argumentative discourse. This field has seen a surge in research in recent years, and involves a variety of tasks, over various domains, including legal, scientific writing and education. Much of the focus is on argumentation mining, the detection of arguments in text and their classification (Palau and Moens, 2009) , but many other tasks are being addressed as well, including argument stance classification (Sobhani et al., 2015; Bar-Haim et al., 2017) , the automatic generation of arguments (Bilu and Slonim, 2016) , identification of persuasive arguments (Wei et al., 2016) , quality assessment (Wachsmuth et al., 2017a) and more. Multiple datasets are available for such research, mostly in English, such as the Internet Argument Corpus (Walker et al., 2012) , that consists of numerous annotated political discussions in internet forums, ArgRewrite (Zhang et al., 2017) , a corpus of argumentative essay revisions, and the datasets released by IBM Research as part of the Debater Project Aharoni et al., 2014) . Lippi and Torroni (2016) list several additional such datasets. Further, Wachsmuth et al. (2017b) have released an argument search engine over multiple debating websites, and Aker and Zhang (2017) have initiated the projection of some datasets to languages other than English, such as Chinese. All of the above are based on written texts, while datasets of spoken debates, outside of the political domain, are scarce. A spoken debate differs from a written essay or discussion not only in structure and content, but also in style as in any other case of spoken vs. written language. Zhang et al. (2016) made available transcripts from the Intelligence Squared 1 debating television show 2 . The transcripts of the show are available on the show's site, and while they are of high quality, they do not match the audio recordings preci-1 http://www.intelligencesquaredus.org 2 http://www.cs.cornell.edu/\u02dccristian/ debates/ sely, requiring substantial additional effort, if one wishes, for example, to use them as ASR training data. With this paper we release a dataset of 60 audio speeches, recorded specifically for debating research purposes. We describe in detail the process of producing these speeches and their automatic and manual transcripts. This is a first batch of a larger set of recordings we intend to produce and release in the future.", |
| "cite_spans": [ |
| { |
| "start": 412, |
| "end": 435, |
| "text": "(Palau and Moens, 2009)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 529, |
| "end": 551, |
| "text": "(Sobhani et al., 2015;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 552, |
| "end": 574, |
| "text": "Bar-Haim et al., 2017)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 615, |
| "end": 638, |
| "text": "(Bilu and Slonim, 2016)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 680, |
| "end": 698, |
| "text": "(Wei et al., 2016)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 720, |
| "end": 745, |
| "text": "(Wachsmuth et al., 2017a)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 863, |
| "end": 884, |
| "text": "(Walker et al., 2012)", |
| "ref_id": null |
| }, |
| { |
| "start": 976, |
| "end": 996, |
| "text": "(Zhang et al., 2017)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1115, |
| "end": 1136, |
| "text": "Aharoni et al., 2014)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1139, |
| "end": 1163, |
| "text": "Lippi and Torroni (2016)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1212, |
| "end": 1236, |
| "text": "Wachsmuth et al. (2017b)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 1310, |
| "end": 1335, |
| "text": "and Aker and Zhang (2017)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1722, |
| "end": 1741, |
| "text": "Zhang et al. (2016)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "We recorded short speeches about debatable topics, with experienced speakers. This section describes the recording process.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recording the Speeches", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Recruiting and training the speakers Our team of speakers are all litigators or debaters, fluent or native English speakers, experienced in arguing about any given topic. The recruitment and training of the speakers included several steps. First, we interviewed potential speakers to evaluate their ability to argue about a topic when given only a short time to prepare. Then, we provided candidates with an essay to read aloud and record. Candidates were given technical guidelines to ensure high recording quality, including microphone configuration instructions and recording best-practices such as to record in a quiet environment, to use an external microphone and to maintain a fixed distance from the microphone while speaking. After listening to these recordings, we provided the speakers with feedback and repeated the process until the essay recordings were of good quality for the naked ear. Next, we provided each candidate with two motions (e.g. \"we should ban boxing\") and asked them to record a spontaneous speech supporting each motion, after a 10 minute preparation. All recordings -three per candidate (one reading and two spontaneous speeches) -were processed through automatic speech recognition and were sent to manual transcription, as described in the next sections. Comparing the automatic and manual transcripts, we computed the system's Word Error Rate (WER, the sum of substitution, deletion and in-sertion error rates) for each speech, and accepted candidates for whom the WER was below a pre-defined threshold of 10%. That, to make sure that our ASR system is reasonably successful on their speeches. The recording process All speakers received a list of motions, each with an ID and a short name (to be easily identified by human readers), and background information extracted from Debatabase 3 or Wikipedia. The speakers were directed to spend up to 10 minutes reviewing the motion's topic and preparing their arguments, and then immediately start recording themselves arguing in its favor for 4-8 minutes. The speakers were instructed not to search for further information about the topic beyond the provided description. The idea is to prevent multiple debaters who record a speech about the same topic from reaching the same resources (in particular debating websites), which may reduce the diversity of the ideas presented in the speeches. Example 1 shows a part of background information for the topic \"doping in sports\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recording the Speeches", |
| "sec_num": "2." |
| }, |
| { |
| "text": "At least as far back as Ben Johnson's steroid scandal at the 1988 Olympics, the use of performance-enhancing drugs in sports had entered the public psyche. Johnson's world record sprint, his win, and then, the stripping of his gold medal made news around the world. However, performance-enhancing drugs in sports do not begin with Johnson ...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Example 1 (Topic background information)", |
| "sec_num": null |
| }, |
| { |
| "text": "Every recorded speech was automatically transcribed by a speaker-independent deep neural network ASR system. The system's acoustic model was trained on over 1000 hours of speech from various broadband speech corpora including broadcast news shows, TED talks 4 and Intelligence Squared debates 5 . We used a 4-gram language model with a vocabulary of 200K words, trained on several billion words that include transcripts of the above speech corpora and various written texts, such as news articles. The ASR system we used is similar to those described in (Soltau et al., 2013; Soltau et al., 2014) . We trained speaker-independent convolutional neural network (CNN) models on 40 dimensional log-mel spectra augmented with delta and double delta features. Each frame of speech is also appended with a context of 5 frames. The first CNN layer of the model has 512 nodes attached with 9 \u00d7 9 filters. Outputs from this layer are then processed by a second feature extraction layer, also with 512 nodes but using a set of 4 \u00d7 3 filters. The outputs from the second CNN layer are finally passed to 5 fully connected layers with 2048 hidden units each, to predict scores for 7K context-dependent states. This speaker-independent ASR system performs on average at 8.4% WER on the speeches we release with this paper.", |
| "cite_spans": [ |
| { |
| "start": 554, |
| "end": 575, |
| "text": "(Soltau et al., 2013;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 576, |
| "end": 596, |
| "text": "Soltau et al., 2014)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Speech Processing", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Once a speech has been automatically transcribed, we obtain a text in the format shown in Example 2. Each token (including sentence boundary and silence markers <s>, <s/>,\u02dcSIL ) is followed by the start and end time of its utterance, in seconds, relative to the beginning of the recording segment. This format is the basis for two versions of the data that we release for each speech: an automatically processed \"clean\" ASR version, and a manually transcribed one. The steps for obtaining the former are described in Section 3.1. The production of manual transcripts is described in Section 4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Speech Processing", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Example 2 (Raw ASR output)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Speech Processing", |
| "sec_num": "3." |
| }, |
| { |
| "text": "To obtain a \"clean\" version of the raw ASR output stream, we post-process it, as detailed below. After this processing, the text in Example 2 is converted to the text in Example 3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ASR transcripts", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "\u2022 Removal of timing information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ASR transcripts", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "\u2022 Removal of non-textual tokens: Silence markers, SIL, appear whenever a relatively long pause has been detected in the speech; sentence boundary tags, <s> and </s>, denote predicted beginnings and ends of sentences. These are the result of the fact that the ASR language model was trained not only on spoken language transcripts, but also on written texts that contain punctuation marks. We have experimentally determined that, for our data, these predictions are not reliable enough to be utilized for sentence splitting on their own and used a dedicated method for this purpose, as described below. We also remove tags such as %hes, denoting unspecified speaker's hesitation, as well as other tokens denoting hesitation that were transcribed explicitly, such as ah, um or uh.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ASR transcripts", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "\u2022 Abbreviations reformatting: The ASR-produced underscored abbreviation (initialism) format (i_b_m) is replaced with the standard all-caps one (IBM). \u2022 Automatic punctuation and sentence splitting: The automatically transcribed text contains no punctuation marks. In downstream tasks, such as syntactic parsing, long texts are often difficult to handle, and we consequently split the stream of ASR output into sentences. Unlike typical sentence-splitting methods, whose main goal is to disambiguate between periods that mark end-of-sentence and those denoting abbreviations, here the text contains no periods, hence a different method is required. We employed a bidirectional Long Short-term Memory (LSTM) network (Hochreiter and Schmidhuber, 1997) to predict commas and end-ofsentence periods over the ASR output. This neural network was trained on debate speeches, like the ones we share in this paper, and on TED talks, taken from the English side of the French-English parallel corpus from the IWSLT 2015 machine translation task (Cettolo et al., 2012). 6 \u2022 Capitalization: We apply basic truecasing to the text: capitalizing sentences' first letters and occurrences of \"I\". We have experimented with more sophisticated truecasing tools and abstained from employing them to the released texts due to mixed results.", |
| "cite_spans": [ |
| { |
| "start": 714, |
| "end": 748, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1058, |
| "end": 1059, |
| "text": "6", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ASR transcripts", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "We should allow doping in sports. So by this, we mean steroids, human growth hormone and other similar drugs should be allowed in pro and amateur sports.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Example 3 (Clean ASR output)", |
| "sec_num": null |
| }, |
| { |
| "text": "As mentioned, the ASR process produces imperfect texts. In order to obtain a \"reference\" text -a precise transcript of the speech -we employ human transcribers to post-edit the automatic transcript, i.e. correct its mistakes. Transcribers selection and training We invited 15 candidates to train as transcribers, all of which are native or fluent English speakers, experienced in linguistic annotation tasks. As a first test, we asked them to transcribe the same four speeches, after carefully reading the guidelines. We used their outcomes for creating ground-truth transcripts: for each speech, we compared its transcripts pair-wise, listened carefully to points of differences, and created a \"goldtranscript\" that resolved all differences between the individual transcripts. Using these four gold-transcripts, we scored the work of the individual transcribers, and accepted as transcribers nine of the candidates whose transcripts were at least 98% accurate. They were further trained by transcribing ten speeches each, and getting feedback on them upon our review. Once done, we considered them \"experienced transcribers\". Transcription methodology In our experience, starting from initial transcripts produced by ASR can halve the time necessary to produce reference transcripts, while maintaining similar transcript quality. This is particularly true if the ASR is highly accurate since it reduces the number of corrections the human transcriber has to make. One should be aware, however, that this procedure can introduce bias, depending on how conscientious the human transcriber is. An inexperienced or less conscientious transcriber may neglect to correct some ASR mistakes. It is also easier for human transcribers to process shorter segments of speech, especially if they have to listen multiple times to unclear segments. Hence, to speed up the process of human transcription, the audio and transcript are first segmented by cutting them at silences longer than 500ms. Excessively long audio segments are then further divided at their longest silences, which must be at least 100ms. Note that the resulting segments do not necessarily correspond to linguistic boundaries or to where punctuation marks should be placed. Instead, in spontaneous speech, a person may pause in the middle of a sentence when faced with an increased cognitive load, e.g. when trying to recall a word. Similar methods of using ASR output as a basis for manual transcription were applied, e.g., by (Park and Zeanah, 2005) and (Matheson, 2007) , for the purpose of transcribing interviews for interview-based research.", |
| "cite_spans": [ |
| { |
| "start": 2486, |
| "end": 2509, |
| "text": "(Park and Zeanah, 2005)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 2514, |
| "end": 2530, |
| "text": "(Matheson, 2007)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Transcription", |
| "sec_num": "4." |
| }, |
| { |
| "text": "The human transcribers used Transcriber 7 , a tool for assisting manual annotation of speech signals through a graphical user interface. The tool synchronizes the text with the audio, and allows the human transcriber to review the text while listening to the audio, and easily pause, fix, annotate, and continue listening from a selected segment. On average, the time needed for manual transcription by experienced transcribers was approximately five times the duration of the audio file. An example of the input to the tool -the output of the above-mentioned segmentation process -is presented in Example 4. The output of the post-edition, which uses the same format, is shown in Example 5. The guidelines used for manual transcription explain how to deal with cases such as speaker hesitation, repetitions and utterance of incomplete words, what punctuation marks to use 8 , how to write abbreviations, numbers, etc. The main principles are that the transcripts should be accurate with respect to the source, capture as much signal as possible, and that they should maintain a uniform format that can be easily parsed in subsequent processing. 9", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Transcription", |
| "sec_num": "4." |
| }, |
| { |
| "text": "Example 4 (Input for manual transcription)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Transcription", |
| "sec_num": "4." |
| }, |
| { |
| "text": "<Sync time=\"18.020\"/> doping is the use of performance enhancing drugs <Sync time=\"21.290\"/> at what i <Sync time=\"22.030\"/> am talking about sports i am of course referring to <Sync time=\"25.015\"/> a competitive sports <Sync time=\"26.630\"/> for example the olympics <Sync time=\"28.320\"/> or other kinds of competitions <Sync time=\"30.040\"/> like a true the fonts <Sync time=\"31.800\"/> and etcetera 7 http://trans.sourceforge.net/en/ presentation.php; We used version 1.5.1 8 The ASR does not produce punctuation marks; it turned out that the transcribers preferred adding them, as it made the text more readable. Punctuation also makes the texts more accessible for analysis and annotation and may be helpful for some automatic processing tasks. 9 The transcription guidelines are shared with the released data.", |
| "cite_spans": [ |
| { |
| "start": 474, |
| "end": 475, |
| "text": "8", |
| "ref_id": null |
| }, |
| { |
| "start": 747, |
| "end": 748, |
| "text": "9", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Transcription", |
| "sec_num": "4." |
| }, |
| { |
| "text": "Example 5 (Output of manual transcription)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Transcription", |
| "sec_num": "4." |
| }, |
| { |
| "text": "<Sync time=\"18.020\"/> doping is the use of performance enhancing drugs . <Sync time=\"21.290\"/> uh when i <Sync time=\"22.030\"/> am talking about sports i am of course referring to <Sync time=\"25.015\"/> uh competitive sports , <Sync time=\"26.630\"/> for example the olympics <Sync time=\"28.320\"/> or other kinds of competitions <Sync time=\"30.040\"/> like uh tour de france <Sync time=\"31.800\"/> uh etcetera ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Transcription", |
| "sec_num": "4." |
| }, |
| { |
| "text": "Some of the annotations in the post-edited transcripts are mostly useful for ASR training, as in the case of word mispronunciation and its correction (e.g. \"lifes/lives\"), while others contain signals that may also be useful for downstream text processing. Our approach in producing the reference transcripts was to remove all non-textual annotations, producing a text-only version of the transcription, that can be used as-is, e.g. for argument extraction. From the Transcriber's output, we first removed all SGML tags and merged the lines into a single stream. We then removed incomplete words and mispronounced words (replacing them with the correct pronunciation); similarly to the raw ASR post-processing, we removed annotations, hesitations, reformatted abbreviations and applied basic truecasing. Then, we detokenized the text, i.e. removed any unnecessary spaces between tokens, for example, before a punctuation mark. Lastly, we applied automatic spell-checking to detect typos and formatting errors, and sent the identified instances of possible typos for review. Example 6 shows the text segment from Example 5 after going through this cleaning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reference Transcripts", |
| "sec_num": "4.1." |
| }, |
| { |
| "text": "Doping is the use of performance enhancing drugs. When I am talking about sports I am of course referring to competitive sports, for example the olympics or other kinds of competitions like tour de france etcetera,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Example 6 (Clean reference transcript)", |
| "sec_num": null |
| }, |
| { |
| "text": "The dataset we created was generated through the process described in the previous sections. We release all file types, including raw and clean versions, to enable research based on various signals, including audio-based ones, such as prosody or speech rate, and to allow performing different post-processing. Table 2 : List of motion topics in our dataset, and the number of speeches per topic. The right column shows the average WER across speeches of the topic, when using the speaker-independent ASR model.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 310, |
| "end": 317, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "5." |
| }, |
| { |
| "text": "The dataset we release includes 60 speeches for 16 motions from , recorded by 10 different speakers. 10 Table 2 provides details about the recordings included in the dataset. There is a large variance in WER across different debate recordings, and between different speakers. The WER of any specific debate can vary depending on the degree of mismatch with the ASR acoustic and language models. Examples of mismatch include differences in speaker voice, speaking style and rate, audio capture (microphone type and placement), ambient noise, word choice and phrasing, etc. By reducing mismatch through model adaptation of speakerdependent acoustic models, the WER can be significantly reduced. For instance, with adaptation using about 15 minutes of a speaker's data, WER of a speech from topic 61 was reduced from 12.9% to 8.6%, and of a speech from topic 483, from 12.2% to 9.7%. The dataset is freely available for research at https://www.research.ibm.com/haifa/ dept/vst/mlta_data.shtml.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 104, |
| "end": 111, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "5." |
| }, |
| { |
| "text": "We wish to thank the many speakers and transcribers that took part in the effort of creating this dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "6." |
| }, |
| { |
| "text": "http://idebate.org/debatabase 4 https://www.ted.com/ 5 We semi-automatically aligned the transcripts and the audio, to overcome the inconsistency problem mentioned in Section 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "This is a simplified version of (Pahuja et al., 2017).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Currently, the list contains only a single female speaker; we are making an effort to recruit more female debaters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Projection of argumentative corpora from source to target languages", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Aker", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 4th Workshop on Argument Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "67--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aker, A. and Zhang, H. (2017). Projection of argumenta- tive corpora from source to target languages. In Procee- dings of the 4th Workshop on Argument Mining, pages 67-72.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Improving claim stance classification with lexical knowledge expansion and context utilization", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Bar-Haim", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Edelstein", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Jochim", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Slonim", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 4th Workshop on Argument Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "32--38", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bar-Haim, R., Edelstein, L., Jochim, C., and Slonim, N. (2017). Improving claim stance classification with lexi- cal knowledge expansion and context utilization. In Pro- ceedings of the 4th Workshop on Argument Mining, pa- ges 32-38.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Claim synthesis via predicate recycling", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Bilu", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Slonim", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bilu, Y. and Slonim, N. (2016). Claim synthesis via predi- cate recycling. In Proceedings of the 54th Annual Meet- ing of the Association for Computational Linguistics, page 525.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Long shortterm memory", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Comput", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hochreiter, S. and Schmidhuber, J. (1997). Long short- term memory. Neural Comput., 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Argumentation mining: State of the art and emerging trends", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Lippi", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Torroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "ACM Trans. Internet Technology", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lippi, M. and Torroni, P. (2016). Argumentation mining: State of the art and emerging trends. ACM Trans. Inter- net Technology, 16:10:1-10:25.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "The voice transcription technique: Use of voice recognition software to transcribe digital interview data in qualitative research", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "L" |
| ], |
| "last": "Matheson", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Qualitative Report", |
| "volume": "12", |
| "issue": "4", |
| "pages": "547--560", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matheson, J. L. (2007). The voice transcription technique: Use of voice recognition software to transcribe digital in- terview data in qualitative research. Qualitative Report, 12(4):547-560.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Joint learning of correlated sequence labelling tasks using bidirectional recurrent neural networks", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Pahuja", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Laha", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Mirkin", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Raykar", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Kotlerman", |
| "suffix": "" |
| }, |
| { |
| "first": "Lev", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of INTERSPEECH, Stockhold", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pahuja, V., Laha, A., Mirkin, S., Raykar, V., Kotlerman, L., and Lev, G. (2017). Joint learning of correlated se- quence labelling tasks using bidirectional recurrent neu- ral networks. In Proceedings of INTERSPEECH, Stock- hold, Sweden.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Argumentation mining: the detection, classification and structure of arguments in text", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [ |
| "M" |
| ], |
| "last": "Palau", |
| "suffix": "" |
| }, |
| { |
| "first": "M.-F", |
| "middle": [], |
| "last": "Moens", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 12th international conference on artificial intelligence and law", |
| "volume": "", |
| "issue": "", |
| "pages": "98--107", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Palau, R. M. and Moens, M.-F. (2009). Argumentation mi- ning: the detection, classification and structure of argu- ments in text. In Proceedings of the 12th international conference on artificial intelligence and law, pages 98- 107. ACM.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "An evaluation of voice recognition software for use in interview-based research: a research note", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "E" |
| ], |
| "last": "Zeanah", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Qualitative Research", |
| "volume": "5", |
| "issue": "2", |
| "pages": "245--251", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Park, J. and Zeanah, A. E. (2005). An evaluation of voice recognition software for use in interview-based research: a research note. Qualitative Research, 5(2):245-251.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Show Me Your Evidence-an Automatic Method for Context Dependent Evidence Detection", |
| "authors": [ |
| { |
| "first": "Ruty", |
| "middle": [], |
| "last": "Rinott", |
| "suffix": "" |
| }, |
| { |
| "first": "Lena", |
| "middle": [], |
| "last": "Dankin", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Perez", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Alzate", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitesh", |
| "middle": [ |
| "M" |
| ], |
| "last": "Khapra", |
| "suffix": "" |
| }, |
| { |
| "first": "Ehud", |
| "middle": [], |
| "last": "Aharoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Slonim", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rinott, Ruty and Dankin, Lena and Perez, Carlos Alzate and Khapra, Mitesh M and Aharoni, Ehud and Slonim, Noam. (2015). Show Me Your Evidence-an Automatic Method for Context Dependent Evidence Detection. As- sociation for Computational Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "From argumentation mining to stance classification", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Sobhani", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Matwin", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2nd Workshop on Argumentation Mining, ArgMining@HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "67--77", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sobhani, P., Inkpen, D., and Matwin, S. (2015). From ar- gumentation mining to stance classification. In Procee- dings of the 2nd Workshop on Argumentation Mining, ArgMining@HLT-NAACL, pages 67-77.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Neural network acoustic models for the DARPA RATS program", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Soltau", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Kuo", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Mangu", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Saon", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Beran", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of Interspeech. ISCA", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Soltau, H., Kuo, H., Mangu, L., Saon, G., and Beran, T. (2013). Neural network acoustic models for the DARPA RATS program. In Proceedings of Interspeech. ISCA.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Joint training of convolutional and non-convolutional neural networks", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Soltau", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Saon", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Sainath", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Soltau, H., Saon, G., and Sainath, T. (2014). Joint training of convolutional and non-convolutional neural networks. In Proceedings of ICASSP. IEEE.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Argumentation quality assessment: Theory vs. practice", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Wachsmuth", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Naderi", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Habernal", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Hou", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Hirst", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "250--255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wachsmuth, H., Naderi, N., Habernal, I., Hou, Y., Hirst, G., Gurevych, I., and Stein, B. (2017a). Argumentation qua- lity assessment: Theory vs. practice. In Proceedings of the 55th Annual Meeting of the Association for Compu- tational Linguistics (Volume 2: Short Papers), volume 2, pages 250-255.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Building an argument search engine for the web", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Wachsmuth", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Potthast", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Khatib", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Ajjour", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Puschmann", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Qu", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Dorsch", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Morari", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Bevendorff", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 4th Workshop on Argument Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "49--59", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wachsmuth, H., Potthast, M., Al Khatib, K., Ajjour, Y., Puschmann, J., Qu, J., Dorsch, J., Morari, V., Beven- dorff, J., and Stein, B. (2017b). Building an argument search engine for the web. In Proceedings of the 4th Workshop on Argument Mining, pages 49-59.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Is this post persuasive? ranking argumentative comments in the online forum", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "The 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei, Z., Liu, Y., and Li, Y. (2016). Is this post persuasive? ranking argumentative comments in the online forum. In The 54th Annual Meeting of the Association for Compu- tational Linguistics, page 195.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Conversational flow in Oxford-style debates", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ravi", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Danescu-Niculescu-Mizil", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, J., Kumar, R., Ravi, S., and Danescu-Niculescu- Mizil, C. (2016). Conversational flow in Oxford-style debates. In Proceedings of HLT-NAACL.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A corpus of annotated revisions for studying argumentative writing", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Hashemi", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Hwa", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Litman", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1568--1578", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, F., B. Hashemi, H., Hwa, R., and Litman, D. (2017). A corpus of annotated revisions for studying argumentative writing. In Proceedings of the 55th An- nual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), pages 1568-1578, Vancouver, Canada, July. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "A benchmark dataset for automatic detection of claims and evidence in the context of controversial topics", |
| "authors": [ |
| { |
| "first": "Ehud", |
| "middle": [], |
| "last": "Aharoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Anatoly", |
| "middle": [], |
| "last": "Polnarov", |
| "suffix": "" |
| }, |
| { |
| "first": "Tamar", |
| "middle": [], |
| "last": "Lavee", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Hershcovich", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ran", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruty", |
| "middle": [], |
| "last": "Rinott", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Gutfreund", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Slonim", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aharoni, Ehud and Polnarov, Anatoly and Lavee, Tamar and Hershcovich, Daniel and Levy, Ran and Rinott, Ruty and Gutfreund, Dan and Slonim, Noam. (2014). A ben- chmark dataset for automatic detection of claims and evidence in the context of controversial topics. Associ- ation for Computational Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "WIT 3 : Web Inventory of Transcribed and Translated Talks. The European Association for Machine Translation (EAMT)", |
| "authors": [ |
| { |
| "first": "Mauro", |
| "middle": [], |
| "last": "Cettolo", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Girardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mauro Cettolo and Christian Girardi and Marcello Fede- rico. (2012). WIT 3 : Web Inventory of Transcribed and Translated Talks. The European Association for Ma- chine Translation (EAMT).", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Show Me Your Evidence-an Automatic Method for Context Dependent Evidence Detection", |
| "authors": [ |
| { |
| "first": "Ruty", |
| "middle": [], |
| "last": "Rinott", |
| "suffix": "" |
| }, |
| { |
| "first": "Lena", |
| "middle": [], |
| "last": "Dankin", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Perez", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Alzate", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitesh", |
| "middle": [ |
| "M" |
| ], |
| "last": "Khapra", |
| "suffix": "" |
| }, |
| { |
| "first": "Ehud", |
| "middle": [], |
| "last": "Aharoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Slonim", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rinott, Ruty and Dankin, Lena and Perez, Carlos Alzate and Khapra, Mitesh M and Aharoni, Ehud and Slonim, Noam. (2015). Show Me Your Evidence-an Automatic Method for Context Dependent Evidence Detection. As- sociation for Computational Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "A Corpus for Research on Deliberation and Debate", |
| "authors": [], |
| "year": 2012, |
| "venue": "European Language Resources Association (ELRA)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marilyn Walker and Jean Fox Tree and Pranav Anand and Rob Abbott and Joseph King. (2012). A Corpus for Re- search on Deliberation and Debate. European Language Resources Association (ELRA).", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "html": null, |
| "type_str": "table", |
| "text": "summarizes the files that are obtained and released for each debatable topic. Clean automatic transcripts trs Manual transcripts trs.txt Clean manual transcripts (references)", |
| "num": null, |
| "content": "<table><tr><td colspan=\"2\">Extension Description</td></tr><tr><td>wav</td><td>Recorded speeches</td></tr><tr><td>asr</td><td>Raw automatic transcripts</td></tr><tr><td>asr.txt</td><td/></tr></table>" |
| }, |
| "TABREF1": { |
| "html": null, |
| "type_str": "table", |
| "text": "Summary of the dataset file types.", |
| "num": null, |
| "content": "<table><tr><td colspan=\"2\">ID Topic</td><td colspan=\"2\">Speeches WER (%)</td></tr><tr><td>1</td><td>Violent video games</td><td>6</td><td>7.4</td></tr><tr><td colspan=\"2\">21 One-child policy</td><td>5</td><td>8.3</td></tr><tr><td colspan=\"2\">61 Doping in sports</td><td>5</td><td>7.7</td></tr><tr><td colspan=\"2\">101 Affirmative action</td><td>5</td><td>9.6</td></tr><tr><td colspan=\"2\">121 Boxing</td><td>5</td><td>9.6</td></tr><tr><td colspan=\"2\">181 Multiculturalism</td><td>2</td><td>8.5</td></tr><tr><td colspan=\"2\">381 The monarchy</td><td>5</td><td>7.3</td></tr><tr><td colspan=\"2\">482 Cultivation of tobacco</td><td>3</td><td>8.2</td></tr><tr><td colspan=\"2\">483 Freedom of speech</td><td>5</td><td>6.7</td></tr><tr><td colspan=\"2\">602 School vouchers</td><td>5</td><td>7.2</td></tr><tr><td colspan=\"2\">644 Year-round schooling</td><td>1</td><td>8.9</td></tr><tr><td colspan=\"2\">681 Intellectual property</td><td>3</td><td>10.9</td></tr><tr><td colspan=\"2\">701 Endangered species</td><td>2</td><td>6.8</td></tr><tr><td colspan=\"2\">841 Blasphemy</td><td>3</td><td>9.3</td></tr><tr><td colspan=\"2\">881 Holocaust denial</td><td>3</td><td>9.8</td></tr><tr><td colspan=\"2\">945 Infant circumcision</td><td>2</td><td>11.2</td></tr></table>" |
| } |
| } |
| } |
| } |