| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:22:14.173858Z" |
| }, |
| "title": "Towards Large-Scale Data Mining for Data-Driven Analysis of Sign Languages", |
| "authors": [ |
| { |
| "first": "Boris", |
| "middle": [], |
| "last": "Mocialov", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Heriot-Watt University", |
| "location": { |
| "settlement": "Edinburgh", |
| "country": "UK" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Turner", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Heriot-Watt University", |
| "location": { |
| "settlement": "Edinburgh", |
| "country": "UK" |
| } |
| }, |
| "email": "g.h.turner@hw.ac.uk" |
| }, |
| { |
| "first": "Helen", |
| "middle": [], |
| "last": "Hastie", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Heriot-Watt University", |
| "location": { |
| "settlement": "Edinburgh", |
| "country": "UK" |
| } |
| }, |
| "email": "h.hastie@hw.ac.uk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Access to sign language data is far from adequate. We show that it is possible to collect the data from social networking services such as TikTok, Instagram, and YouTube by applying data filtering to enforce quality standards and by discovering patterns in the filtered data, making it easier to analyse and model. Using our data collection pipeline, we collect and examine the interpretation of songs in both the American Sign Language (ASL) and the Brazilian Sign Language (Libras). We explore their differences and similarities by looking at the co-dependence of the orientation and location phonological parameters.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Access to sign language data is far from adequate. We show that it is possible to collect the data from social networking services such as TikTok, Instagram, and YouTube by applying data filtering to enforce quality standards and by discovering patterns in the filtered data, making it easier to analyse and model. Using our data collection pipeline, we collect and examine the interpretation of songs in both the American Sign Language (ASL) and the Brazilian Sign Language (Libras). We explore their differences and similarities by looking at the co-dependence of the orientation and location phonological parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The data-driven field of automated sign language understanding is dependent on large amounts of high-quality data, independent of the application or the motivation of the research. Unfortunately, such data have typically had restricted access, either due to the projects finishing or limiting license terms. Online services, on the other hand, offer large amounts of data that have more relaxed terms and conditions and are available for as long as the service providers exist. The field of written and spoken languages has recently greatly benefited from the use of such data for natural language understanding projects trained on, for example, Reddit, Twitter, Amazon reviews. However, there is an uneven distribution of users of spoken languages versus sign language users around the world. Therefore, the amount of sign language data on the internet is naturally lower than that of spoken languages. Moreover, sign languages do not have a common writing system as opposed to spoken languages, which makes it very difficult to annotate. Furthermore, the data from research projects usually have application-dependent annotation using either words in written languages, phonological parameters, or sign pictures (Konrad, 2015) . Despite the fact that there is no common writing system for sign languages, HamNoSys defines one notation system that is often used by researchers. This system distinguishes phonological parameters (e.g. location, orientation, movement, handshape, non-manual gestures) present during signing (Hanke, 2004) . First, we show that it is possible to utilise social networking platforms to support research in data-driven automated sign language understanding. Second, we take a look at two sign languages that have relatively little historical relationship. One sign language being ASL and the second being Libras and investigate the signing behaviour of the spoken song interpreters, while looking at three English songs: 'Love Yourself' by Justin Bieber, 'Halo' by Beyonc\u00e9, and 'Love On The Brain' by Rihanna. We investigate and compare two phonological parameters: hand location relative to the signers' body and extended finger orientation. This work will quantify frequently occurring hand positioning during the signing and compare the prevailing hand positions and orientations between the two sign languages, aiming to show that sign languages evolve differently. The reason why we investigate interpreted songs is because we want to compare sign languages by looking at continuous signing in different sign languages that sign the same information. Findings in this paper could also assist researchers who work on developing models for sign language understanding by reducing the search space of the models during the optimisation by ignoring combinations that are relatively infrequent during continuous signing.", |
| "cite_spans": [ |
| { |
| "start": 1214, |
| "end": 1228, |
| "text": "(Konrad, 2015)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 1523, |
| "end": 1536, |
| "text": "(Hanke, 2004)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "With the rise of accurate pose prediction and hand estimation libraries such as the OpenPose (Cao et al., 2018; Cao et al., 2017; Wei et al., 2016) , researchers in the field of automated sign language understanding are now able to focus on high level abstract research ideas. Contemporary research looks at translating written languages to sign languages and vice versa, thus resembling research done in the field of the machine translation for spoken languages (Camg\u00f6z et al., 2018; Stoll et al., 2019; Yuan et al., 2019) . The common linguistically inspired approach is for the raw visual modality of the sign languages to be broken down into the sub-lexical phonological parameters (e.g. location, orientation, movement, handshape, and non-manual gestures). Multiple previous works have modelled individual phonological parameters. Cooper and Bowden (2007) modelled hand location, movement, and relative hand position and called them the sub-sign units. Cooper et al. (2012) relied on handshape, location, movement, and relative hand position in their work on recognition of the individual signs in The British Sign Language. Buehler et al. (2009) examined movement, handshape, and orientation while matching the combination of these parameters to find similar signs. Also Buehler et al. (2010) used location and handshape in the multiple instance learning problem. Koller et al. 2016focused on modelling sixty handshapes. Their model is a chain of convolutional neural networks (VGG) pre-trained on the ImageNet data (Simonyan and Zisserman, 2015) . In our work, we generate linguistic annotations in the form of hand location relative to the signers' body and extended finger orientation for the continuous interpretations of the three English songs (mentioned above).", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 111, |
| "text": "(Cao et al., 2018;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 112, |
| "end": 129, |
| "text": "Cao et al., 2017;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 130, |
| "end": 147, |
| "text": "Wei et al., 2016)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 463, |
| "end": 484, |
| "text": "(Camg\u00f6z et al., 2018;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 485, |
| "end": 504, |
| "text": "Stoll et al., 2019;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 505, |
| "end": 523, |
| "text": "Yuan et al., 2019)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 836, |
| "end": 860, |
| "text": "Cooper and Bowden (2007)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 958, |
| "end": 978, |
| "text": "Cooper et al. (2012)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1130, |
| "end": 1151, |
| "text": "Buehler et al. (2009)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1277, |
| "end": 1298, |
| "text": "Buehler et al. (2010)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1522, |
| "end": 1552, |
| "text": "(Simonyan and Zisserman, 2015)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation for Automated Processing of Sign Languages", |
| "sec_num": "2." |
| }, |
| { |
| "text": "The lack of the text annotation that could provide context remains an issue for video data. Joze and Koller (2018) noticed that many signing videos have captions, which could be an additional source of annotation, as more and more content is being generated online, including that for the deaf community. In this work, we focus only on the linguistic annotations without inferring the context.", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 114, |
| "text": "Joze and Koller (2018)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Motivation for Automated Processing of Sign Languages", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Context-Specific (Ko et al., 2019) Isolated Signs (Zhou et al., 2009) Individual Phonological Parameters (Cooper et al., 2012) Translation Sign-Text (Camg\u00f6z et al., 2018) Text-Sign (Stoll et al., 2019) Learning Zero-Shot (Bilge et al., 2019) Clustering (Nandy et al., 2010) Augmentation (Mocialov et al., 2017) Linguistic Studies Phonological Parameter Co-Dependence (\u00d6stling et al., 2018)", |
| "cite_spans": [ |
| { |
| "start": 17, |
| "end": 34, |
| "text": "(Ko et al., 2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 50, |
| "end": 69, |
| "text": "(Zhou et al., 2009)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 105, |
| "end": 126, |
| "text": "(Cooper et al., 2012)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 149, |
| "end": 170, |
| "text": "(Camg\u00f6z et al., 2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 181, |
| "end": 201, |
| "text": "(Stoll et al., 2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 221, |
| "end": 241, |
| "text": "(Bilge et al., 2019)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 253, |
| "end": 273, |
| "text": "(Nandy et al., 2010)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 287, |
| "end": 310, |
| "text": "(Mocialov et al., 2017)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recognition", |
| "sec_num": null |
| }, |
| { |
| "text": "Teaching (Stefanov and Beskow, 2017) Edutainment (Zafrulla et al., 2011) Sign Spotting Queries (Belissen, 2018) We group surveyed papers by their motivation, omitting works that use data other than a single RGB camera and papers that focus on pose estimation, tracking, or finger spelling, as these do not directly align with research in sign language understanding. Table 1 categorises the research directions in the field of automated sign language understanding. It can be seen that there are projects that focus on more abstract concepts than learning the isolated signs, such as automated data-driven sign language translation. Apart from the recognition of the signs as an attempt to bridge the gap between the hearing and the deaf communities, assistive tools for digital sign language content annotation are gaining interest. For example, Takayama and Takahashi (2018) automatically annotate datasets and Belissen (2018) query databases with videos of signs, which could be beneficial for accelerating research in linguistic aspects of sign language.", |
| "cite_spans": [ |
| { |
| "start": 9, |
| "end": 36, |
| "text": "(Stefanov and Beskow, 2017)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 49, |
| "end": 72, |
| "text": "(Zafrulla et al., 2011)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 95, |
| "end": 111, |
| "text": "(Belissen, 2018)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 847, |
| "end": 876, |
| "text": "Takayama and Takahashi (2018)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 367, |
| "end": 374, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Education", |
| "sec_num": null |
| }, |
| { |
| "text": "Unknown context Some standard Possible description", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Filtering", |
| "sec_num": null |
| }, |
| { |
| "text": "Inferred context Some standard Matching description", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discovering Patterns", |
| "sec_num": null |
| }, |
| { |
| "text": "Online Data Data Analysis / Modelling Figure 1 : Data collection pipeline for collecting the signing data from the social networking services for the purpose of data-driven automated sign language understanding Data from online social-media resources tends to be very unpredictable. Therefore, the collection of data has to pass through a number of stages as we suggest in Figure 1 . The first stage of the data collection pipeline is data filtering, where we turn the online data that has no standard into data that has some pre-defined standard. The second stage looks for the patterns in the filtered data either with the help of metadata or the automatic visual analysis of the collected filtered data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 38, |
| "end": 46, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 373, |
| "end": 381, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discovering Patterns", |
| "sec_num": null |
| }, |
| { |
| "text": "In this research, we focus on interpreted songs. As deafspecific music performers are rare, sign language users resort to 'listening' to interpretations of the songs found in the spoken or written languages that are being interpreted by those who can both hear and sign. This is evidenced by the relatively large amount of content found in online resources such as TikTok, YouTube, or Instagram. Such content makes the interpretation of the spoken songs possible for the deaf community, encouraging visualisation of music (Desblache, 2019) . We consider interpreted songs as our data format because it is possible to find the same songs interpreted in different sign languages, which makes the comparison of the sign languages more precise. We collect one video for every interpreted song for each sign language. Therefore, we have collected a small dataset of continuous signing videos from YouTube from six different signers, interpreting three songs in two sign languages for this proof of concept study.", |
| "cite_spans": [ |
| { |
| "start": 522, |
| "end": 539, |
| "text": "(Desblache, 2019)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Online Data", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "We use the OpenPose library for data filtering. The library detects 2D or 3D anatomical key-points, associated with the human body, hands, face, and feet in a single image. The library provides 21 (x,y) key-points for every part of the hand, 25 key-points for the whole body skeleton, and 70 key-points for the face. The OpenPose library helps us apply simple filters to the raw data, discarding all the content that has more than one signer at the same time or any heavy obstructions or occlusions. We also discard the content that has too few key-points visible, as we think it is essential to see the upper body and the hands to make sense of the signing. By performing such filtering, we enforce a quality standard upon the collected online data. However, the context and the signer profile remain unknown. Other filters could include normalisation, transformation, and rotation of key-points to make the signer appear the same size across videos and to make signers face the camera for more accurate modelling.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Filtering", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "We perform visual analysis by extracting the location and orientation sub-lexical phonological parameters from the filtered data looking at the frequency of occurrences of specific location/orientation combinations in the collected filtered data. The following sections will show how we infer sublexical components by making use of identified key-points and geometry. Likewise, metadata can assist in discovering patterns. This metadata can comprise of hashtags, textual description, or the embedded captions on the videos.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discovering Patterns", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "A total of eight orientations have been used for the extended finger orientation as defined in the HamNoSys notation with each orientation having 45 \u2022 movement (north, north-east, east, south-east, south, south-west, west, and north-west). HamNoSys does define more orientations (e.g. towards or away from the body), however having 2D data makes it difficult to estimate additional orientations. The angle is calculated using the inverse trigonometric function between the radius and middle finger coordinates as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extended Finger Orientation", |
| "sec_num": "3.3.1." |
| }, |
| { |
| "text": "\u2212\u03c0/2 < arctan (q y \u2212 p y , q x \u2212 p x ) < \u03c0/2,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extended Finger Orientation", |
| "sec_num": "3.3.1." |
| }, |
| { |
| "text": "where q and p are the (x, y) coordinates of radius and middle finger metacarpal bones with every orientation having \u03c0/4 freedom", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extended Finger Orientation", |
| "sec_num": "3.3.1." |
| }, |
| { |
| "text": "A total of six locations around the body have been used to determine hand position (ears, eyes, nose, neck, shoulder, and abdomen), as opposed to the forty six defined by the HamNoSys notation system. Six were chosen to simplify the detection while complying with the OpenPose library standards. Hand centroids are calculated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hand Location Relative to the Body", |
| "sec_num": "3.3.2." |
| }, |
| { |
| "text": "centroid right = ( N i=1 x i right /N, N i=1 y i right /N ) centroid lef t = ( N i=1 x i lef t /N, N i=1 y i lef t /N )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hand Location Relative to the Body", |
| "sec_num": "3.3.2." |
| }, |
| { |
| "text": "where N is the number of points provided by the OpenPose library for each hand. In order to assign the relative hand location, a threshold has to be assigned as to how far the centroid of a hand can be from a specific body location so as to still be relatively close to that body part. All the distances are measured in pixels and the threshold is set to be 10% of the diagonal of the image frame, which is approximately 100 pixels. If the distance of a centroid away from all the body parts exceeds the threshold, the hand is considered to be in the 'neutral signing space'. The distance matrix D for every hand is calculated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hand Location Relative to the Body", |
| "sec_num": "3.3.2." |
| }, |
| { |
| "text": "M r . . . N r = |q m...n \u2212 centroid right | M l . . . N l = |q m...n \u2212 centroid lef t | D = \uf8eb \uf8ec \uf8ed M r M l . . . . . . N r N l \uf8f6 \uf8f7 \uf8f8", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hand Location Relative to the Body", |
| "sec_num": "3.3.2." |
| }, |
| { |
| "text": "Where q m...n are the (x, y) position of the body parts, defined by the OpenPose library (e.g. nose, neck, shoulder, elbow, etc.) and the M r . . . N r and M l . . . N l are the Euclidean distances between the body parts and right and left hand centroids. In order to find the body part B right or B lef t , which has the smallest distance to the centroid of the right or left hand, we use", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hand Location Relative to the Body", |
| "sec_num": "3.3.2." |
| }, |
| { |
| "text": "B right = argmax D i,1 B lef t = argmax D i,2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hand Location Relative to the Body", |
| "sec_num": "3.3.2." |
| }, |
| { |
| "text": "The distances are then compared to a threshold to determine if a hand is near a particular body part or is in the 'neutral signing space' anywhere around the body.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hand Location Relative to the Body", |
| "sec_num": "3.3.2." |
| }, |
| { |
| "text": "Once the data has been filtered and the patterns have been discovered, we acquired information on 43016 hand locations and the same number for the hand orientations for ASL and 38258 for both hand location and orientation for Libras for the interpreted three songs. We are interested in the analysis of the co-dependence of phonological parameters for each hand and comparing the significant co-dependences across the two sign languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Analysis / Modelling", |
| "sec_num": "3.4." |
| }, |
| { |
| "text": "Here, we refer to location as T AB and to orientation as ORI for the shorthand notation. First, a global C ORI N ,T AB M contingency table is generated and counts the occurences for both location/orientation variables for every category that occurs in the collected data (e.g. North, North-East, etc. for orientation and Shoulder, Neck, etc. for location). Second, a series of local contingency tables C 2\u00d72 are constructed from the global C ORI N ,T AB M contingency table for every category of every variable as a post-hoc step. Finally, Bonferroni-adjusted p-value was used (Bland and Altman, 1995) to check if the presence of a particular location/orientation combination in the data set is significant, compared to other location/orientation combinations, by performing a Chi-square test of independence of variables for all the C 2\u00d72 contingency tables. Table 2 : Word cloud generated from the lyrics for the three English language songs Table 2 shows the word cloud for the three songs ('Love Yourself' by Justin Bieber, 'Halo' by Beyonc\u00e9, and 'Love On The Brain' by Rihanna) generated from the lyrics obtained online. The purpose of the word cloud is to give insight into which words are frequent in the lyrics. As it can be seen, the lyrics for all the songs often mention love and romantic feelings. Table 3 shows screenshots of the collected online data for three English songs performed by three different artists. The songs are interpreted in two sign languages by different signers. From the screenshots, it can be seen that the proximity of the signer to the camera varies. Some videos are edited by applying black and white or vintage camera filters. As a general rule, there is no camera movement, but the signers usually dance slightly to the songs. Table 4 : Screenshots of the filtered online data after applying the OpenPose library. Visible skeletons on the screenshot means that the library was able to detect a human in the video and is tracking the pose, hand, and face key-points Table 4 shows screenshots of the filtered data after the Open-Pose library has been applied to the data. The library was able to detect a human in the video and is tracking the pose, hand, and face key-points. Since we are not interested in keeping the integrity of the sequences of the frames, we simply discard the frames where key-points were not detected by the library. Table 5 shows the relative frequencies of the location/orientation combinations for each video and each sign language. We can observe that the Libras, on one hand, has less abdomen activity than the ASL (indicated in light blue) while, on the other hand, Libras has more neck and ears activity than the ASL (dark blue and green respectively). Both sign languages have more pointing up direction of the hands as opposed to other possible directions (wider NE/N/NW columns). Figure 2 : Significant location and orientation phonological parameter co-dependences in a) ASL and b) Libras with Bonferroni-adjusted Chi-squared p-value < 0.001 for both sign languages", |
| "cite_spans": [ |
| { |
| "start": 577, |
| "end": 601, |
| "text": "(Bland and Altman, 1995)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 860, |
| "end": 867, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 944, |
| "end": 951, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1310, |
| "end": 1317, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 1768, |
| "end": 1775, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 2006, |
| "end": 2013, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 2381, |
| "end": 2388, |
| "text": "Table 5", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 2854, |
| "end": 2862, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Phonological Parameter Co-Dependence", |
| "sec_num": "3.4.1." |
| }, |
| { |
| "text": "C ORI N ,T AB M = \uf8eb \uf8ec \uf8ec \uf8ed c ORI 1 ,T AB 1 . . . c ORI 1 ,T AB M . . . . . . . . . c ORI N ,T AB 1 c ORI N ,T AB M \uf8f6 \uf8f7 \uf8f7 \uf8f8 S ORI N ,T AB M = C ORI N ,T AB M \u2229 (C ORI i ,T AB j \u222a ORIi \u222a T ABj ) C2\u00d72 = C ORI i ,T AB j ORIi T ABj S ORI N ,T AB M", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Phonological Parameter Co-Dependence", |
| "sec_num": "3.4.1." |
| }, |
| { |
| "text": "Having visually analysed the frequencies of the location/orientation combinations, we are interested in finding the significant combinations for each hand that prevails in the collected data and compare the two sign languages based on this analysis. Figure 2 shows significant location/orientation codependences for each sign language after the Bonferroniadjusted Chi-squared p-value analysis. We can see that both hands tend to point up at the upper side of the body, which is similar for the both sign languages. Libras, however, has more activity with both hands at the upper part of the body than the ASL. As a matter of fact, Libras has more activity with both hands around all the parts of the body. In ASL, on the other hand, the left hand is less mobile than the right hand. This could be explained by the fact that the signers in Libras were left-handed, but we do not have this information available to verify this speculation. Some significant co-dependences are unusual, for example, pointing down at the upper body level, which may feel unnatural and slightly contradicts the past findings by Cooper et al. (2012) stating that a subset of the 'comfortable' hand configurations are assumed more often during the signing, independent of the sign language. This can also be explained by the fact that the signers in the video are slightly dancing to the music, which may affect the signing orientation. It is worth mentioning that the co-dependence analysis results of the two languages may change with the data. For example, if songs with a different sentiment were taken for the analysis. More data is needed to experiment this further.", |
| "cite_spans": [ |
| { |
| "start": 1106, |
| "end": 1126, |
| "text": "Cooper et al. (2012)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 250, |
| "end": 258, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Analysis / Modelling", |
| "sec_num": "4.4." |
| }, |
| { |
| "text": "In this work, we have showed the preliminary results of mining sign language data acquired from the internet for au-tomated data-driven sign language processing. We have created a pipeline that downloads the videos of the interpreted songs from the internet, applies filtering of the data and then finds patterns in the data based on the HamNoSys notation that is often used for the annotation of the sign languages. This method could also be used for querying videos in large datasets. Finally, we compare two historically different sign languages (ASL and Libras) by their location/orientation co-dependencies present in the collected data and show that, despite there being little historical background of the two languages interacting, they still share similar signing patterns with small variations in the flexibility of the hands, which can be explained by the fact that people converge to the usage of the 'comfortable' hand configurations. Future work will compare even more historically unrelated sign languages and look at the interpretations of a greater number of songs, in order to have a more accurate comparison of the signing patterns across the sign languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5." |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Sign language video analysis for automatic recognition and detection", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Belissen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 20th International ACM SIGACCESS Conference on Computers and Accessibility", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Belissen, V. (2018). Sign language video analysis for au- tomatic recognition and detection. In Proceedings of the 20th International ACM SIGACCESS Conference on Com- puters and Accessibility.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Zero-shot sign language recognition: Can textual data uncover sign languages?", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [ |
| "C" |
| ], |
| "last": "Bilge", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Ikizler-Cinbis", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "G" |
| ], |
| "last": "Cinbis", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the British Machine Vision Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bilge, Y. C., Ikizler-Cinbis, N., and Cinbis, R. G. (2019). Zero-shot sign language recognition: Can textual data uncover sign languages? In Proceedings of the British Machine Vision Conference.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Multiple significance tests: the bonferroni method", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "M" |
| ], |
| "last": "Bland", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "G" |
| ], |
| "last": "Altman", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "BMJ", |
| "volume": "310", |
| "issue": "6973", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bland, J. M. and Altman, D. G. (1995). Multiple signifi- cance tests: the bonferroni method. BMJ, 310(6973):170.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Learning sign language by watching tv (using weakly aligned subtitles)", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Buehler", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Zisserman", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Everingham", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the IEEE conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Buehler, P., Zisserman, A., and Everingham, M. (2009). Learning sign language by watching tv (using weakly aligned subtitles). In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Employing signed tv broadcasts for automated learning of British Sign Language", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Buehler", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Everingham", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Zisserman", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 4th Workshop on the Representation and Processing of Sign Languages", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Buehler, P., Everingham, M., and Zisserman, A. (2010). Employing signed tv broadcasts for automated learning of British Sign Language. In Proceedings of the 4th Work- shop on the Representation and Processing of Sign Lan- guages.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Neural sign language translation", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [ |
| "C" |
| ], |
| "last": "Camg\u00f6z", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Hadfield", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Koller", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Bowden", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Camg\u00f6z, N. C., Hadfield, S., Koller, O., Ney, H., and Bow- den, R. (2018). Neural sign language translation. In Proceedings of the Conference on Computer Vision and Pattern Recognition.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Realtime multi-person 2D pose estimation using part affinity fields", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Simon", |
| "suffix": "" |
| }, |
| { |
| "first": "S.-E", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Sheikh", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the IEEE conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cao, Z., Simon, T., Wei, S.-E., and Sheikh, Y. (2017). Real- time multi-person 2D pose estimation using part affinity fields. In Proceedings of the IEEE conference on Com- puter Vision and Pattern Recognition.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "OpenPose: realtime multi-person 2D pose estimation using Part Affinity Fields", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Hidalgo", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Simon", |
| "suffix": "" |
| }, |
| { |
| "first": "S.-E", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Sheikh", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:1812.08008" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cao, Z., Hidalgo, G., Simon, T., Wei, S.-E., and Sheikh, Y. (2018). OpenPose: realtime multi-person 2D pose estimation using Part Affinity Fields. In arXiv preprint arXiv:1812.08008.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Large lexicon detection of sign language", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Cooper", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Bowden", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the International Workshop on Human-Computer Interaction", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cooper, H. and Bowden, R. (2007). Large lexicon detection of sign language. In Proceedings of the International Workshop on Human-Computer Interaction.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Sign language recognition using sub-units", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Cooper", |
| "suffix": "" |
| }, |
| { |
| "first": "E.-J", |
| "middle": [], |
| "last": "Ong", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Pugeault", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Bowden", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "13", |
| "issue": "", |
| "pages": "2205--2231", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cooper, H., Ong, E.-J., Pugeault, N., and Bowden, R. (2012). Sign language recognition using sub-units. Jour- nal of Machine Learning Research, 13(Jul):2205-2231.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "How is Music Translated? Mapping the Landscape of Music Translation", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Desblache", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "219--264", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Desblache, L., (2019). How is Music Translated? Map- ping the Landscape of Music Translation, pages 219-264. Palgrave Macmillan UK, London.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Hamnosys-representing sign language data in language resources and language processing contexts", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Hanke", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Workshop on Representation and processing of sign languages", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hanke, T. (2004). Hamnosys-representing sign language data in language resources and language processing con- texts. In Proceedings of the Workshop on Representation and processing of sign languages (LREC 2004).", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "MS-ASL: A largescale data set and benchmark for understanding American Sign Language", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [ |
| "R V" |
| ], |
| "last": "Joze", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Koller", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the British Machine Vision Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joze, H. R. V. and Koller, O. (2018). MS-ASL: A large- scale data set and benchmark for understanding American Sign Language. In Proceedings of the British Machine Vision Conference.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Neural sign language translation based on human keypoint estimation", |
| "authors": [ |
| { |
| "first": "S.-K", |
| "middle": [], |
| "last": "Ko", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "J" |
| ], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Jung", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Applied Sciences", |
| "volume": "9", |
| "issue": "13", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ko, S.-K., Kim, C. J., Jung, H., and Cho, C. (2019). Neu- ral sign language translation based on human keypoint estimation. Applied Sciences, 9(13):2683.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Deep hand: How to train a cnn on 1 million hand images when your data is continuous and weakly labelled", |
| "authors": [ |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Koller", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Bowden", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the IEEE conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Koller, O., Ney, H., and Bowden, R. (2016). Deep hand: How to train a cnn on 1 million hand images when your data is continuous and weakly labelled. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "DGS corpus annotation guidelines", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Konrad", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of Digging into Signs Workshop: Developing Annotation Standards for Sign Language Corpora", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Konrad, R. (2015). DGS corpus annotation guidelines. In Proceedings of Digging into Signs Workshop: Developing Annotation Standards for Sign Language Corpora.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Towards continuous sign language recognition with deep learning", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Mocialov", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Turner", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "S" |
| ], |
| "last": "Lohan", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Hastie", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of Workshop of Creating Meaning With Robot Assistants", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mocialov, B., Turner, G., Lohan, K. S., and Hastie, H. (2017). Towards continuous sign language recognition with deep learning. In Proceedings of Workshop of Creat- ing Meaning With Robot Assistants (Humanoids 2017).", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Recognition of Isolated Indian Sign Language Gesture in Real Time", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Nandy", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "S" |
| ], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Mondal", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Chakraborty", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [ |
| "C \"" |
| ], |
| "last": "Nandi", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [ |
| "V V" |
| ], |
| "last": "Vijayakumar", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Debnath", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "C" |
| ], |
| "last": "Stephen", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Meghanathan", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Sankaranarayanan", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Thankachan", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "M" |
| ], |
| "last": "Gaol", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [ |
| "L" |
| ], |
| "last": "Thankachan", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Conference on Information Processing and Management", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nandy, A., Prasad, J. S., Mondal, S., Chakraborty, P., Nandi, G. C.\", e. V. V., Vijayakumar, R., Debnath, N. C., Stephen, J., Meghanathan, N., Sankaranarayanan, S., Thankachan, P. M., Gaol, F. L., and Thankachan, N. (2010). Recog- nition of Isolated Indian Sign Language Gesture in Real Time. In Proceedings of the Conference on Information Processing and Management.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Visual iconicity across sign languages: Large-scale automated video analysis of iconic articulators and locations. Frontiers in psychology", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Ostling", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "B\u00f6rstell", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Courtaux", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "9", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ostling, R., B\u00f6rstell, C., and Courtaux, S. (2018). Visual iconicity across sign languages: Large-scale automated video analysis of iconic articulators and locations. Fron- tiers in psychology, 9:725.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Hand keypoint detection in single images using multiview bootstrapping", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Simon", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Joo", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Matthews", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Sheikh", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the IEEE conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simon, T., Joo, H., Matthews, I., and Sheikh, Y. (2017). Hand keypoint detection in single images using multiview bootstrapping. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Very deep convolutional networks for large-scale image recognition", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Simonyan", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Zisserman", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simonyan, K. and Zisserman, A. (2015). Very deep convo- lutional networks for large-scale image recognition. In Proceedings of the International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "A Real-Time Gesture Recognition System for Isolated Swedish Sign Language Signs", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Stefanov", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Beskow", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 4th European and 7th Nordic Symposium on Multimodal Communication (MMSYM)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefanov, K. and Beskow, J. (2017). A Real-Time Gesture Recognition System for Isolated Swedish Sign Language Signs. In Proceedings of the 4th European and 7th Nordic Symposium on Multimodal Communication (MMSYM).", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Text2sign: Towards sign language production using neural machine translation and generative adversarial networks", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Stoll", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "C" |
| ], |
| "last": "Camg\u00f6z", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Hadfield", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Bowden", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Journal of Computer Vision", |
| "volume": "", |
| "issue": "", |
| "pages": "1--18", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stoll, S., Camg\u00f6z, N. C., Hadfield, S., and Bowden, R. (2019). Text2sign: Towards sign language production using neural machine translation and generative adversar- ial networks. International Journal of Computer Vision, pages 1-18.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Sign words annotation assistance using Japanese Sign Language words recognition", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Takayama", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Takahashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the International Conference on Cyberworlds", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Takayama, N. and Takahashi, H. (2018). Sign words an- notation assistance using Japanese Sign Language words recognition. In Proceedings of the International Confer- ence on Cyberworlds (CW).", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Convolutional pose machines", |
| "authors": [ |
| { |
| "first": "S.-E", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Ramakrishna", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Kanade", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Sheikh", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the IEEE conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei, S.-E., Ramakrishna, V., Kanade, T., and Sheikh, Y. (2016). Convolutional pose machines. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Large scale sign language interpretation", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Sah", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Ananthanarayana", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Bhat", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Gandhi", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Ptucha", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--5", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuan, T., Sah, S., Ananthanarayana, T., Zhang, C., Bhat, A., Gandhi, S., and Ptucha, R. (2019). Large scale sign language interpretation. In Proceedings of the 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019), pages 1-5. IEEE.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "CopyCat: An American Sign Language game for deaf children", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Zafrulla", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Brashear", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Presti", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Hamilton", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Starner", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the International Conference on Automatic Face and Gesture Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "647--647", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zafrulla, Z., Brashear, H., Presti, P., Hamilton, H., and Starner, T. (2011). CopyCat: An American Sign Lan- guage game for deaf children. In Proceedings of the In- ternational Conference on Automatic Face and Gesture Recognition, pages 647-647, March.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Adaptive sign language recognition with exemplar extraction and map/ivfs", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "IEEE signal processing letters", |
| "volume": "17", |
| "issue": "3", |
| "pages": "297--300", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhou, Y., Chen, X., Zhao, D., Yao, H., and Gao, W. (2009). Adaptive sign language recognition with exemplar ex- traction and map/ivfs. IEEE signal processing letters, 17(3):297-300.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "num": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF3": { |
| "num": null, |
| "type_str": "table", |
| "text": "Screenshots of the collected online data for three songs by three different artists with English spoken language interpreted by six different signers, three signers per sign language", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF6": { |
| "num": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td>: Location/orientation relative frequencies for each</td></tr><tr><td>video for each sign language</td></tr></table>", |
| "html": null |
| } |
| } |
| } |
| } |