| { |
| "paper_id": "W08-0117", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:36:54.355143Z" |
| }, |
| "title": "Implicit Proposal Filtering in Multi-Party Consensus-Building Conversations", |
| "authors": [ |
| { |
| "first": "Yasuhiro", |
| "middle": [], |
| "last": "Katagiri", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Future University -Hakodate", |
| "location": {} |
| }, |
| "email": "katagiri@fun.ac.jp" |
| }, |
| { |
| "first": "Yosuke", |
| "middle": [], |
| "last": "Matsusaka", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "yosuke.matsusaka@aist.go.jp" |
| }, |
| { |
| "first": "Yasuharu", |
| "middle": [], |
| "last": "Den", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chiba University", |
| "location": {} |
| }, |
| "email": "den@cogsci.l.chiba-u.ac.jp" |
| }, |
| { |
| "first": "Mika", |
| "middle": [], |
| "last": "Enomoto", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Tokyo University of Technology", |
| "location": {} |
| }, |
| "email": "menomoto@media.teu.ac.jp" |
| }, |
| { |
| "first": "Masato", |
| "middle": [], |
| "last": "Ishizaki", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Tokyo", |
| "location": {} |
| }, |
| "email": "ishizaki@iii.u-tokyo.ac.jp" |
| }, |
| { |
| "first": "Katsuya", |
| "middle": [], |
| "last": "Takanashi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Kyoto University", |
| "location": {} |
| }, |
| "email": "takanasi@ar.media.kyoto-u.ac.jp" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "An attempt was made to statistically estimate proposals which survived the discussion to be incorporated in the final agreement in an instance of a Japanese design conversation. Low level speech and vision features of hearer behaviors corresponding to aiduti, noddings and gaze were found to be a positive predictor of survival. The result suggests that non-linguistic hearer responses work as implicit proposal filters in consensus building, and could provide promising candidate features for the purpose of recognition and summarization of meeting events.", |
| "pdf_parse": { |
| "paper_id": "W08-0117", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "An attempt was made to statistically estimate proposals which survived the discussion to be incorporated in the final agreement in an instance of a Japanese design conversation. Low level speech and vision features of hearer behaviors corresponding to aiduti, noddings and gaze were found to be a positive predictor of survival. The result suggests that non-linguistic hearer responses work as implicit proposal filters in consensus building, and could provide promising candidate features for the purpose of recognition and summarization of meeting events.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Non-verbal signals, such as gaze, head nods, facial expressions and bodily gestures, play significant roles in the conversation organization functions. Several projects have been collecting multimodal conversation data (Carletta et al., 2006) for multi-party dialogues in order to develop techniques for meeting event recognitions from non-verbal as well as verbal signals. We investigate, in this paper, hearer response functions in multi-party consensusbuilding conversations. We focus particularly on the evaluative aspect of verbal and non-verbal hearer responses. During the course of a consensus-building discussion meeting, a series of proposals are put on the table, examined, evaluated and accepted or rejected. The examinations of proposals can take the form of explicit verbal exchanges, but they can also be implicit through accumulations of hearer responses. Hearers would express, mostly unconsciously for non-verbal signals, their interest and positive appraisals toward a proposal when it is introduced and is being discussed, and that these hearer responses would collectively contribute to the determination of final consensus making. The question we address is whether and in what degree it is possible and effective to filter proposals and estimate agreement by using verbal and non-verbal hearer responses in consensus-building discussion meetings.", |
| "cite_spans": [ |
| { |
| "start": 219, |
| "end": 242, |
| "text": "(Carletta et al., 2006)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2 Multi-Party Design Conversation Data", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We chose multi-party design conversations for the domain of our investigation. Different from a fixed problem solving task with a 'correct' solution, participants are given partially specified design goals and engage in a discussion to come up with an agreement on the final design plan. The condition of our data collection was as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data collection", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Number of participants: six for each session Arrangement: face-to-face conversation Task: Proposal for a new mobile phone business Role: No pre-determined role was imposed A compact meeting archiver equipment, AIST-MARC (Asano and Ogata, 2006) , which can capture panoramic video and speaker-separated speech streams, was used to record conversations (Fig. 1) . The data we examined consist of one 30 minutes conversation conducted by 5 males and 1 female. Even though we did not assign any roles, a chairperson and a clerk were spontaneously elected by the participants at the beginning of the session. In order to provide a clause level segmentation of a multi-channel speech stream, we extended the notion of 'clause units (CUs)', originally developed for analyzing spoken monologues in the Corpus of Spontaneous Japanese (Takanashi et al., 2003) , to include reactive tokens (Clancy et al., 1996) and other responses in spoken conversations. Two of the authors who worked on the Corpus of Spontaneous Japanese independently worked on the data and resolved the differences, which created 1403 CUs consisting of 469 complete utterances, 857 reactive tokens, and 77 incomplete or fragmental utterances.", |
| "cite_spans": [ |
| { |
| "start": 220, |
| "end": 243, |
| "text": "(Asano and Ogata, 2006)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 825, |
| "end": 849, |
| "text": "(Takanashi et al., 2003)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 879, |
| "end": 900, |
| "text": "(Clancy et al., 1996)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 351, |
| "end": 359, |
| "text": "(Fig. 1)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data collection", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We developed a simple classification scheme of discourse segments for multi-party consensus building conversations based on the idea of 'interaction process analysis' (Bales, 1950) .", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 180, |
| "text": "(Bales, 1950)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposal units", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "Proposal: Presentation of new ideas and their evaluation. Substructure are often realized through elaboration and clarification. Summary: Sum up multiple proposals possibly with their assessment Orientation: Lay out a topic to be discussed and signal a transition of conversation phases, initiated mostly by the facilitator of the discussion Miscellaneous: Other categories including opening and closing segments", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposal units", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "The connectivity between clause units, the content of the discussion, interactional roles, relationship with adjacent segments and discourse markers were considered in the identification of proposal units. Two of the authors, one worked on the Corpus of Spontaneous Japanese and the other worked for the project of standardization of discourse tagging, independently worked on the data and resolved the differences, which resulted in 19 proposals, 8 summaries, 19 orientations and 2 miscellaneouses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposal units", |
| "sec_num": "2.2.2" |
| }, |
| { |
| "text": "Core clause units (CUs) were selected, out of all the clause units, based on whether the CUs have substantial content as a proposal. A CU was judged as a core CU, when the annotator would find it appropriate to express, upon hearing the CU, either an approval or a disapproval to its content if she were in the position of a participant of the conversation. Three of the authors worked on the text data excluding the reactive tokens, and the final selection was settled by majority decision. 35 core CUs were selected from 235 CUs in the total of 19 proposal PUs. Cohen's kappa agreement rate was 0.894. Survived proposal units (PUs) were similarly selected, out of all the proposal units, based on whether the PUs were incorporated in the final agreement among all the participants. 9 survived PUs were selected from 19 proposal PUs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Core clause units and survived proposal units", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "For each clause unit (CU), verbal and non-verbal features concerning hearer's behavior were extracted from the audio and the video data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Extraction of Hearer's Behavior", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We focused on nodding and gaze, which were approximated by vertical and horizontal head movements of participants.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-Verbal Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "An image processing algorithm ( Figure 2 ) was applied to estimate head directions and motions (Matsusaka, 2005) . Figure 3 shows a sample scene and the results of applying head direction estimation algorithm. For each CU, the vertical and horizontal components of head movements of 5 hearers were calculated for two regions, the region inside the CU and the 1-sec region immediately after the CU. For each of the two regions, the mean and the peak values and the relative location, in the region, of the peak were computed. These 12 non-verbal features were used for the statistical modeling.", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 112, |
| "text": "(Matsusaka, 2005)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 32, |
| "end": 40, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 115, |
| "end": 123, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Non-Verbal Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Verbal features were extracted from the audio data. For each CU, power values of 5 hearers were extracted for two regions, 'within' and 'after' CU, and for each of the two regions, the mean and the peak values and the relative location, in the region, of the peak were computed. In addition to these verbal features, we also used aiduti features of reactive tokens (RTs). The percentage of the total duration of RTs, the total number of RTs, and the number of participants who produced an RT were computed in 'within' and 'after' regions for each of the CUs. A total of 12 CU verbal features were used for the statistical modeling.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Verbal Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Statistical modeling was employed to see if it is possible to identify the proposal units (PUs) that are survived in the participants' final consensus. To this end, we, first, find the dominant clause unit (CU) in each PU, and, then, based on the verbal and nonverbal features of these CUs, we classify PUs into 'survived' and 'non-survived.' ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview of the Algorithm", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "A logistic regression model was used to model the coreness of CUs. A total of 24 verbal and non-verbal features were used as explanatory variables. Since the number of non-core CUs was much larger than that of core CUs, down-sampling of negative instances was performed. To obtain a reliable estimation, a sort of Monte Carlo simulation was adopted. A model selection by using AIC was applied for the 35 core CUs and another 35 non-core CUs that were re-sampled from among the set of 434 complete and non-core CUs. This process was repeated 100 times, and the features frequently selected in this simulation were used to construct the optimal model. Table 1 shows the estimated coefficient for the optimal model, and Table 2 shows the accuracy based on a leave-1-out cross validation. The dominant CU in each PU was identified as the CU ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 650, |
| "end": 657, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 717, |
| "end": 724, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Finding Dominant CUs", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "0 0 0 0 1 0 0 0 0 0 (0) (0) (1) (0) (0)(0)(0) (0) (0) (0) (0) (0) (1) (0) (0) (0) (0) (0)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finding Dominant CUs", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Figure 4: The predicted coreness of CUs. Dominant CUs were defined to be CUs with the highest coreness in each of the PUs. Black and white dots are CUs labeled as core and non-core. 3.96 within/speech power/mean \u221227.76 after/speech power/peak val.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finding Dominant CUs", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "1.49 with the highest predicted value in that PU. Figure 4 shows the predicted values for coreness.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 50, |
| "end": 58, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Finding Dominant CUs", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The verbal and non-verbal features of the dominant CUs of each of the PUs were used for the modeling of the survived-PU prediction. Discriminant analysis was utilized and a model selection was applied for the 47 PUs. Table 3 shows the estimated coefficient for the optimal model, and Table 4 shows the accuracy based on a leave-1-out cross validation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 217, |
| "end": 224, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 284, |
| "end": 291, |
| "text": "Table 4", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Finding Survived PUs", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The results of our estimation experiments indicate that the final agreement outcome of the discussion can be approximately estimated at the proposal level. Though it may not be easy to identify actual utterances contributing to the agreement (core-CUs), the dominant CUs in PUs were found to be effective in the identification of survived-PUs. The prediction accuracy of survived-PUs was about 89%, with the chance level of 69%, whereas that of core-CUs was about 92%, with the chance level of 86%. In terms of hearer response features, intensity of verbal responses (within/speech power/mean, after/speech power/mean), and immediate nodding responses (after/vertical motion/peak loc.) were the most common contributing features in core-CU estimation. In contrast, occurrence of a strong aiduti immediately after, rather than within, the core-CU (after/speech power/peak val.), and a strong nodding within the core-CU (within/vertical motion/peak val.) appear to be signaling support from hearers to the proposal. It should be noted that identification of target hearer behaviors must be validated against manual annotations before these generalizations are established. Nevertheless, the results are mostly coherent with our intuitions on the workings of hearer responses in conversations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We have shown that approximate identification of the proposal units incorporated into the final agreement can be obtained through the use of statistical pattern recognition techniques on low level speech and vision features of hearer behaviors. The result provides a support for the idea that hearer responses convey information on hearers' affective and evaluative attitudes toward conversation topics, which effectively functions as implicit filters for the proposals in the consensus building process.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The work reported in this paper was supported by Japan Society for the Promotion of Science Grants-in-aid for Scientific Research (B) 18300052.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Detection and separation of speech events in meeting recordings", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Asano", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Ogata", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "2586--2589", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "F. Asano and J. Ogata. 2006. Detection and separation of speech events in meeting recordings. In Proc. Inter- speech, pages 2586-2589.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A set of categories for the analysis of small group interaction", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [ |
| "F" |
| ], |
| "last": "Bales", |
| "suffix": "" |
| } |
| ], |
| "year": 1950, |
| "venue": "", |
| "volume": "15", |
| "issue": "", |
| "pages": "257--263", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. F. Bales. 1950. A set of categories for the analysis of small group interaction. American Sociological Re- view, 15:257-263.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The AMI meeting corpus: A pre-announcement", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Carletta", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ashby", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Bourban", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Flynn", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Guillemot", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Hain", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kadlec", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Karaiskos", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Kraaij", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Kronenthal", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Lathoud", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Lincoln", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Lisowska", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Mccowan", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Reidsma", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Wellner", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Machine Learning for Multimodal Interaction", |
| "volume": "", |
| "issue": "", |
| "pages": "28--39", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Carletta, S. Ashby, S. Bourban, M. Flynn, M. Guille- mot, T. Hain, J. Kadlec, V. Karaiskos, W. Kraaij, M. Kronenthal, G. Lathoud, M. Lincoln, A. Lisowska, I. McCowan, W. Post, D. Reidsma, and P. Wellner. 2006. The AMI meeting corpus: A pre-announcement. In Machine Learning for Multimodal Interaction, pages 28-39.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The conversational use of reactive tokens in English, Japanese and Mandarin", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [ |
| "M" |
| ], |
| "last": "Clancy", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "A" |
| ], |
| "last": "Thompson", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Suzuki", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Tao", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Journal of Pragmatics", |
| "volume": "26", |
| "issue": "", |
| "pages": "355--387", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. M. Clancy, S. A. Thompson, R. Suzuki, and H. Tao. 1996. The conversational use of reactive tokens in En- glish, Japanese and Mandarin. Journal of Pragmatics, 26:355-387.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Recognition of 3 party conversation using prosody and gaze", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Matsusaka", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proc. Interspeech", |
| "volume": "", |
| "issue": "", |
| "pages": "1205--1208", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Y. Matsusaka. 2005. Recognition of 3 party conversation using prosody and gaze. In Proc. Interspeech, pages 1205-1208.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Identification of 'sentence' in. spontaneous Japanese: detection and modification of clause boundaries", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Takanashi", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Maruyama", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Uchimoto", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Isahara", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proc. ISCA & IEEE Workshop on Spontaneous Speech Processing and Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "183--186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K. Takanashi, T. Maruyama, K. Uchimoto, and H. Isahara. 2003. Identification of 'sentence' in. spontaneous Japanese: detection and modification of clause boundaries. In Proc. ISCA & IEEE Workshop on Spontaneous Speech Processing and Recognition, pages 183-186.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Figure 1: AIST-MARC and a recording scene" |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Image processing algorithm" |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Sample scene with image processing results. The circles represent detected face areas, and the lines in the circles represent head directions." |
| }, |
| "TABREF0": { |
| "html": null, |
| "content": "<table><tr><td>Estimate</td></tr></table>", |
| "type_str": "table", |
| "num": null, |
| "text": "The optimal model for finding core-CUs" |
| }, |
| "TABREF1": { |
| "html": null, |
| "content": "<table><tr><td colspan=\"3\">: Confusion matrix of core-CU prediction experi-</td></tr><tr><td colspan=\"2\">ment (precision = 0.50, recall = 0.086)</td><td/></tr><tr><td/><td>Predicted</td><td/></tr><tr><td colspan=\"3\">Observed Non-core Core</td></tr><tr><td>Non-core</td><td>431</td><td>3</td></tr><tr><td>Core</td><td>32</td><td>3</td></tr></table>", |
| "type_str": "table", |
| "num": null, |
| "text": "" |
| }, |
| "TABREF2": { |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "num": null, |
| "text": "The optimal model for finding survived-PUs Estimate within/vertical motion/peak val." |
| }, |
| "TABREF3": { |
| "html": null, |
| "content": "<table><tr><td colspan=\"3\">: Result of the survived-PU prediction (precision</td></tr><tr><td>= 0.83, recall = 0.44)</td><td/><td/></tr><tr><td/><td>Predicted</td><td/></tr><tr><td>Observed</td><td colspan=\"2\">Non-survived Survived</td></tr><tr><td>Non-survived</td><td>37</td><td>1</td></tr><tr><td>Survived</td><td>4</td><td>5</td></tr></table>", |
| "type_str": "table", |
| "num": null, |
| "text": "" |
| } |
| } |
| } |
| } |