| { |
| "paper_id": "O03-3003", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:01:25.275340Z" |
| }, |
| "title": "Restoration of Case Information in All-Cap English Broadcast Transcription", |
| "authors": [ |
| { |
| "first": "Yu-Ting", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Tsing Hua University", |
| "location": { |
| "addrLine": "101, Kuangfu Road", |
| "postCode": "300", |
| "settlement": "Hsinchu", |
| "country": "Taiwan, ROC" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Jian-Chen", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Tsing Hua University", |
| "location": { |
| "addrLine": "101, Kuangfu Road, Hsinchu, 300", |
| "country": "Taiwan, ROC" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "", |
| "pdf_parse": { |
| "paper_id": "O03-3003", |
| "_pdf_hash": "", |
| "abstract": [], |
| "body_text": [ |
| { |
| "text": "g904374@cs.nthu.edu.tw", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The local broadcast ICRT (International Community Radio Taipei) in Taipei has their news scripts sent to their listeners in ALL CAPS, which makes the articles more difficult to read. Therefore, we think it may facilitate the readers if we transform the text into normal cases that we are familiar with. In this prototype system, we established a practical method of restoration of case information, using different techniques from NLP and statistics. The system can apply many different kinds of approach, however, in this prototype, we focus our analysis and test data on broadcast transcription.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Basically, our research involves: Establishing a very large database containing numerous vocabularies and uses as our training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Obtaining text from ICRT news scripts sent by e-mails as our test data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Restoring the cases of the contents into cases that we are more acquainted with.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Handling some exceptions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Establishing a very large database. Our training data comes from VOA news, which consists of 9138 articles, 3 million words in total. For each article, we segment its contents into individual words and calculate their n-gram probabilities.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Obtaining text from ICRT news scripts. We perform similar piecing process on the news scripts. After obtaining each isolated word, we query its probabilities in unigram, bigram, and trigram probabilities, which have two, four, and eight values respectively from our training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Restoring the cases of the contents. After accomplishing Viterbi algorithm (Rabiner, 1989) to compute the highest probability and its P-model value (Lucian Vlad Lita, 2003) , we acquire the best restoration of case for each word, and then we alter the texts. We have an example in Figure 1 , an original text from one of the ICRT news scripts and the text after restoration. wash. that appears to be a major shift in the military's relations with the news media. Again, we found some adjustments have to be done, and the first letter of the first word in a sentence ought to be in upper case is one of them. Even so, we have to ask ourselves, \"What is a sentence?\" Is it something ends up with a period, an exclamation mark, or a question mark?", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 90, |
| "text": "(Rabiner, 1989)", |
| "ref_id": null |
| }, |
| { |
| "start": 148, |
| "end": 172, |
| "text": "(Lucian Vlad Lita, 2003)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 281, |
| "end": 289, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Apparently, we can find a counter example with \"U.S.\". Here we use heuristic sentence boundary detection algorithm to determine what a sentence is and capitalized the first words in a sentence as shown in Figure 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 205, |
| "end": 213, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "After restoration: The U.S. military issued a public apology to the people of a Shiite Muslim neighborhood in Baghdad on Thursday for an incident in which a man was killed and four others wounded after an American Black Hawk helicopter blew down an Islamic banner with its rotor wash. That appears to be a major shift in the military's relations with the news media. Our demonstration model shows we can convert all-cap English news scripts quite well. There are some possible improvements and our future works are improving our performance, which can reduce the time we spend on transforming the text. Also, create a macro in Outlook so if the readers receive their e-mails from ICRT with Outlook, they may have the restoration done by running a macro. We are looking forward to finding the readers feeling this tool useful and somewhat convenient.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We acknowledge the support of NSC under contract number: 92-2815-C-007 -004 -E . Many thanks are due to Dr. Jason S. Chang for his guidance in NLP and ICRT for their news scripts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Teaching a Weaker Classifier: Named Entity Recognition on Upper Case Text", |
| "authors": [ |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Leong", |
| "suffix": "" |
| }, |
| { |
| "first": "Chieu", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Hwee Tou Ng", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hai Leong Chieu, Hwee Tou Ng, 2002 Teaching a Weaker Classifier: Named Entity Recognition on Upper Case Text.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A Knowledge-free Method for Capitalized Word Disambiguation", |
| "authors": [ |
| { |
| "first": "Andrei", |
| "middle": [], |
| "last": "Mikheev", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrei Mikheev, 1999 A Knowledge-free Method for Capitalized Word Disambiguation.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Fuzzy Typing for Document Management", |
| "authors": [ |
| { |
| "first": "Alison", |
| "middle": [], |
| "last": "Huettner", |
| "suffix": "" |
| }, |
| { |
| "first": "Pero", |
| "middle": [], |
| "last": "Subasic", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alison Huettner, Pero Subasic, 2000 Fuzzy Typing for Document Management.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Foundations of statistical natural language processing", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Schutze", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "123--136", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher D. Manning and Hinrich Schutze. Foundations of statistical natural language processing, 2000, pp. 123-136", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "An example of upper case text and restorationHandling exceptions. Actually, the word 'Sampson' was not found in our training data, however, we assume unknown words as proper nouns and therefore we capitalize its first letter. Here we have another experiment inFigure 2." |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Another example of upper case text and restoration" |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "The previous example after sentence adjustment" |
| } |
| } |
| } |
| } |