| { |
| "paper_id": "Y11-1010", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:39:18.239343Z" |
| }, |
| "title": "Automatic Wrapper Generation and Maintenance", |
| "authors": [ |
| { |
| "first": "Yingju", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "LTD. 15F Tower A", |
| "institution": "", |
| "location": { |
| "addrLine": "No.56 Dong Si Huan Zhong Rd", |
| "postCode": "100025", |
| "settlement": "Chaoyang District, Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "yjxia@cn.fujitsu.com" |
| }, |
| { |
| "first": "Yuhang", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "LTD. 15F Tower A", |
| "institution": "", |
| "location": { |
| "addrLine": "No.56 Dong Si Huan Zhong Rd", |
| "postCode": "100025", |
| "settlement": "Chaoyang District, Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Fujiang", |
| "middle": [], |
| "last": "Ge", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "LTD. 15F Tower A", |
| "institution": "", |
| "location": { |
| "addrLine": "No.56 Dong Si Huan Zhong Rd", |
| "postCode": "100025", |
| "settlement": "Chaoyang District, Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Shu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "LTD. 15F Tower A", |
| "institution": "", |
| "location": { |
| "addrLine": "No.56 Dong Si Huan Zhong Rd", |
| "postCode": "100025", |
| "settlement": "Chaoyang District, Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "LTD. 15F Tower A", |
| "institution": "", |
| "location": { |
| "addrLine": "No.56 Dong Si Huan Zhong Rd", |
| "postCode": "100025", |
| "settlement": "Chaoyang District, Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper investigates automatic wrapper generation and maintenance for Forums, Blogs and News web sites. Web pages are increasingly dynamically generated using a common template populated with data from databases. This paper proposes a novel method that uses tree alignment and transfer learning method to generate the wrapper from this kind of web pages. The tree alignment algorithm is adopted to find the best matching structure of the input web pages. A kind of linear regression method is employed to get the weight of different tag-matching. A transfer learning method is adopted to find the most likely content block. A wrapper built on the most probable content block and the repeating patterns extracts data from web pages. The wrapper maintenance arises because web source may experiment changes that invalidate the current wrappers. This paper presents a wrapper maintenance method using a log likelihood ratio test for detecting the change points on the similarity series which gotten from the wrapper and input web pages. The wrapper generation method is applied to generate a wrapper once the web source change is detected. Experimental results show that the method achieves high accuracy and has steady performance", |
| "pdf_parse": { |
| "paper_id": "Y11-1010", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper investigates automatic wrapper generation and maintenance for Forums, Blogs and News web sites. Web pages are increasingly dynamically generated using a common template populated with data from databases. This paper proposes a novel method that uses tree alignment and transfer learning method to generate the wrapper from this kind of web pages. The tree alignment algorithm is adopted to find the best matching structure of the input web pages. A kind of linear regression method is employed to get the weight of different tag-matching. A transfer learning method is adopted to find the most likely content block. A wrapper built on the most probable content block and the repeating patterns extracts data from web pages. The wrapper maintenance arises because web source may experiment changes that invalidate the current wrappers. This paper presents a wrapper maintenance method using a log likelihood ratio test for detecting the change points on the similarity series which gotten from the wrapper and input web pages. The wrapper generation method is applied to generate a wrapper once the web source change is detected. Experimental results show that the method achieves high accuracy and has steady performance", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Web-based information is typically formatted to be read by human users, not by computer applications. Information agents are being proposed which automatically extract information from multiple websites. Data is typically extracted from web sources by writing specialized programs, called wrappers (Laender et al. 2002) , which identify data of interest and map them to a suitable format.", |
| "cite_spans": [ |
| { |
| "start": 298, |
| "end": 319, |
| "text": "(Laender et al. 2002)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Many approaches have been reported in the literature for wrapper generation. Detailed discussions of various approaches can be found in several surveys (Chang et al., 2006; Laender et al., 2002) .", |
| "cite_spans": [ |
| { |
| "start": 152, |
| "end": 172, |
| "text": "(Chang et al., 2006;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 173, |
| "end": 194, |
| "text": "Laender et al., 2002)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Early approaches were based on manual techniques (Atzeni and Mecca, 1997; Crescenzi and Mecca, 1998; Huck et al., 1998; Sahuguet and Azavant, 1999) . By observing a web page and its source code, the programmer find some patterns from the page and then write a program to extract data from the web pages. A key problem with manually coded wrappers is that writing them is a difficult and labor-intensive task, and tends to be brittle and difficult to maintain.", |
| "cite_spans": [ |
| { |
| "start": 61, |
| "end": 73, |
| "text": "Mecca, 1997;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 74, |
| "end": 100, |
| "text": "Crescenzi and Mecca, 1998;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 101, |
| "end": 119, |
| "text": "Huck et al., 1998;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 120, |
| "end": 147, |
| "text": "Sahuguet and Azavant, 1999)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Other approaches have some degrees of automation. In semi-automatic approaches (Cohen et al., 2002; Irmak and Suel, 2006; Kushmerick, 2000; Muslea et al., 1999; Pinto et al., 2003; Wang and Hu, 2002; Zheng et al., 2007) , a set of extraction rules are learnt from a set of manually labeled pages or data records. These rules are then used to extract data items from similar pages. This method still requires substantial manual efforts.", |
| "cite_spans": [ |
| { |
| "start": 79, |
| "end": 99, |
| "text": "(Cohen et al., 2002;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 100, |
| "end": 121, |
| "text": "Irmak and Suel, 2006;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 122, |
| "end": 139, |
| "text": "Kushmerick, 2000;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 140, |
| "end": 160, |
| "text": "Muslea et al., 1999;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 161, |
| "end": 180, |
| "text": "Pinto et al., 2003;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 181, |
| "end": 199, |
| "text": "Wang and Hu, 2002;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 200, |
| "end": 219, |
| "text": "Zheng et al., 2007)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In automatic methods, Arasu and Hector (2003) , Chang and Liu (2001) and Crescenzi et al. (2001) found patterns or grammars from multiple pages containing similar data records. Wang and Lochovsky (2003) treated the input pages as strings and employed an algorithm to discover the continuously repeated substrings using suffix trees. Lerman et al. (2004) utilized the detailed data in the page behind the current page to identify data records. Simon and Lausen (2005) identified and ranked potential repeated patterns using visual features. Then matched 25th Pacific Asia Conference on Language, Information and Computation, pages 90-99 subsequences of the pattern with the highest weight was aligned with global multiple sequence alignment techniques.", |
| "cite_spans": [ |
| { |
| "start": 22, |
| "end": 45, |
| "text": "Arasu and Hector (2003)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 48, |
| "end": 68, |
| "text": "Chang and Liu (2001)", |
| "ref_id": null |
| }, |
| { |
| "start": 73, |
| "end": 96, |
| "text": "Crescenzi et al. (2001)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 177, |
| "end": 202, |
| "text": "Wang and Lochovsky (2003)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 333, |
| "end": 353, |
| "text": "Lerman et al. (2004)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 453, |
| "end": 466, |
| "text": "Lausen (2005)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Several methods were presented to address the wrapper maintenance problem. Kushmerick (1999) defined a problem called \"wrapper verification,\" which checks if a wrapper stops extracting correct data. Their proposed solution analyzes pages and extracted information, and detects the page changes. If the pages have changed, the designer is notified, so that she can relearn the wrapper from the pages with the new structure. Lerman et al. (2003) developed a method for repairing wrappers in the case of small mark-up changes. Chidlovskii (2001) presented an automatic maintenance approach to repairing wrappers under the assumption that there are only small changes. Raposo et al.(2005) made wrappers collect some results from valid queries during their operation, and when the source changes, use those results to generate a new training set of labeled examples to bootstrap the wrapper induction process again. Meng et al. (2003) presented a schema-guided approach which is based on the observation that despite various page changes, many important features of the pages are preserved, such as syntactic patterns, annotations, and hyperlinks of the extracted data items. Their approach uses these preserved features to identify the locations of the desired values in the changed pages, and repair wrappers correspondingly by inducing semantic blocks from the HTML tree.", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 92, |
| "text": "Kushmerick (1999)", |
| "ref_id": null |
| }, |
| { |
| "start": 423, |
| "end": 443, |
| "text": "Lerman et al. (2003)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 524, |
| "end": 542, |
| "text": "Chidlovskii (2001)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 665, |
| "end": 684, |
| "text": "Raposo et al.(2005)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 911, |
| "end": 929, |
| "text": "Meng et al. (2003)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Those previous methods focused on the list pages (Each of such pages contains lists of objects, for example, the pages in the shopping website such as Amazon.com.). This kind of web pages can be retrieved using queries which enable the \"wrapper verification\" procedure. But for the web pages from News, Forum and Blogs, the \"wrapper verification\" approach cannot be utilized because these web pages cannot be retrieved using queries and such no valid training set can be provided for the wrapper maintenance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This paper presents a method that uses tree alignment to automatically build wrapper from web pages coming from News, Forums and Blogs websites. A kind of linear regression method is proposed to get the weight of different tag-matching. Based on the alignment, we merge the trees into one union tree whose nodes record the statistical information gotten from multiple web pages. We use a transfer learning method to find the most likely content block and use the alignment algorithm to detect the repeat patterns on the union tree. A log likelihood ratio test is adopted to the wrapper maintenance. Because the likelihood ratios describe evidence rather than embody a decision, they can easily be adapted to the various goals for which inferential statistics might be used. The likelihood ratios provide an intuitive approach to summarizing the evidence provided by an experiment in wrapper maintenance scenario.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "For the wrapper generation, in the sense of techniques used, the most relevant approaches are (Zhai and Liu, 2005; Zigoris et al., 2006) . Zhai and Liu(2005) used partial alignment method to align and extract data items from the identified data records. Zigoris et al.(2006) used Support Vector Machines (SVM) for learning the tree alignment parameters. With well-tuned parameters these models are resilient.", |
| "cite_spans": [ |
| { |
| "start": 94, |
| "end": 114, |
| "text": "(Zhai and Liu, 2005;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 115, |
| "end": 136, |
| "text": "Zigoris et al., 2006)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 139, |
| "end": 157, |
| "text": "Zhai and Liu(2005)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 254, |
| "end": 274, |
| "text": "Zigoris et al.(2006)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Compared with these methods, the wrapper generation method proposed in this study presents a kind of linear regression method to get the weight of different tag-matching. The algorithm is dedicated to adopt different node features and different matching weights while the others haven't take into account the categories of html tags, the properties of different level nodes and the text features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Another major difference between the proposed method and the previous works is the way the alignment algorithm and the statistics are used. Zhai and Liu(2005) used the alignment algorithm to align the data items (data fields) from the identified data records. A link was created when a matching was found. The method proposed in this paper utilizes the alignment algorithm to obtain skeleton of the input trees, merges the trees into one union tree and records the statistical information. The proposed method employed the most probable content block finding step to locate the content blocks. The statistics recorded in the union tree makes this step more accurate because the heuristic is often used to differentiate the content from junk information.", |
| "cite_spans": [ |
| { |
| "start": 140, |
| "end": 158, |
| "text": "Zhai and Liu(2005)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We present a transfer learning method to get the weight of each feature when finding the most probable content block. The proposed method gets steady performance due to the statistics used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For the wrapper maintenance, as mentioned in Section 1, the previous methods focused on the list pages. The \"wrapper verification\" approach cannot be utilized on the web pages from News, Forums and Blogs. There are no literature considering the wrapper maintenance on News, Forums and Blogs websites. In this paper, a log likelihood ratio test is adopted to wrapper maintenance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "There are several steps in the proposed method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Generation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "(1) Wrapper generation. We use the tree alignment methods to calculate the similarity between input web pages and build a wrapper on tree alignment results. The tree alignment method is also used to calculate the similarity between wrapper and the input web pages. The input trees are merged into one union tree whose nodes record the statistical information such as the times a node has been aligned, the text length of the node. A heuristic method is employed to find the most probable content block. The alignment algorithm is utilized again to detect the repeating patterns on the union tree. The wrapper is generated based on the most probable content block and the repeating patterns.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Generation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "(2) Similarity series generation. A similarity series was built by calculating the similarity between the input web pages and the current wrapper using the tree alignment algorithm proposed in this paper. The similarity series is in the order of the input web pages' timestamp.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Generation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "(3) Change point detection and wrapper regeneration. A log likelihood ratio test is utilized to detect the change points on the similarity series. The wrapper generation method is applied again to generate a wrapper once a change point is detected.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Generation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this study, we are interested in one specific type of tree called labeled ordered rooted tree. A rooted tree is a tree whose root vertex is fixed. Ordered rooted trees are rooted trees in which the relative order of the children is fixed for each vertex. We use the tree edit distance to evaluate the structural similarities between Web pages. In its traditional formulation, the tree edit distance problem considers three operations: node removal, node insertion and node replacement. The solution of this problem consists in determining the minimal set of operations to transform one tree into another. Another equivalent formulation of this problem is to discover a mapping with minimum cost between the two trees.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Generation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this work, we focus on setting the weight (cost) of different node mapping (tag-matching). One of the major contributions of our work is a kind of linear regression method for getting the weight of different tag-matching. Another contribution of our work is the way we use the similarity between trees and the transfer learning method which is used for finding the most likely content block.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Generation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The main problem of the previous method is that they did not consider about employing different weights for various tag-matching. For example, the HTML tags are divided into two categories: block elements and inline elements. The block elements are elements that usually, but not always, contain other elements. They normally act as containers of some sort. The inline elements normally mark up the semantic meaning of something. Furthermore, the level of the different nodes should also be considered. The higher-level nodes should have higher weight as the higher-level nodes usually act as bigger structure block. Different weight should be assigned to different type of tag-matching.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatically getting tag-matching weight", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In this study, a kind of linear regression method is employed to get the weight of different tag-matching. First, we found a collection of similar web pages belong to the same \"class\" (The web pages share the common format and layout characteristics, usually generated with the same template, for example, the web pages of the same board in one Forum website). It's feasible to get this kind of web pages collection automatically. Next, we will use this web pages collection for getting the optimal weighting schema.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatically getting tag-matching weight", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Let w i be the weight of tag-matching and w i > w j for i < j. Let D mn be the sum of the gains in the best alignment between the trees T m and T n .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatically getting tag-matching weight", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "( 1)Where is the number of w i occur in the alignment procedure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatically getting tag-matching weight", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The sum of the gains in the collection is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatically getting tag-matching weight", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\uf0e5 \uf0e5 \uf0e5\uf0e5 \uf0e5 \uf03d \uf03d \uf03d i n m mn i i n m i mn i i n m mn t w t w D f , , ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatically getting tag-matching weight", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "(2) Because the collection is the similar web pages belonging to the same \"class\", a set of w i is selected which makes the maximum f.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatically getting tag-matching weight", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To get , a constraint is added. The group of equations is rewritten as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatically getting tag-matching weight", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "(3) The solution of the above equations is used as the weight of each type of tag-matching (w i ). Figure 1 illustrates an example of the weight setting method. For one collection of similar web pages belong to the same \"class\", we calculate the sum of the alignment gains (or the similarity) for each weighting schema. The best weighting schema is the one maximize the sum of the gains. That means to find a set of w i that output the maximum f in the equations (3). ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 99, |
| "end": 107, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Automatically getting tag-matching weight", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Using the alignment algorithm, we can find whether a node has been aligned. We then merge the two trees into one union tree and record the alignment information in each node. After processing several trees, we can use the statistic such as the ratio of the times a node has been aligned to make the decision whether this node should be kept or not. The union tree becomes more compact after deleting some useless nodes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The next step is finding the most probable content block (the data in the content block is what we want to extract). In general cases, there is one content block in news page and several content blocks in forum and blog page. The content block detecting method is shown below:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "(4) Where, f i is the feature and w i is its weight. There are many heuristic features (Christian , 2009) such as the variance of the text length of nodes, the ratio of the length of the link to the length of the text in the node, the ratio of the fixed text length and number of stop words inside the DOM node.", |
| "cite_spans": [ |
| { |
| "start": 87, |
| "end": 105, |
| "text": "(Christian , 2009)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The remained issue is how to get the weight of each feature. Since there are three related types: News, Forums and Blogs. We can consider this problem as transfer learning (Jing, 2009) . We are interested in getting the weight of target webpage type T and we have labeled instance for K auxiliary type A 1 , \u2026, A k . Let k w denote the weight vector of the linear classifier for the auxiliary type A k and T w denote the weight vector for the target type T. we now assume that these weight vectors are related through a common component v: , , for k = 1,2, \u2026, K If we assume that only weight of certain general features can be shared between different web page types, we can force certain dimensions of v to be 0. We use a square matrix F and set Fv=0. The entries of F are set to 0 except that F i,i =1 if we want to force v i =0.", |
| "cite_spans": [ |
| { |
| "start": 172, |
| "end": 184, |
| "text": "(Jing, 2009)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Now we can learn these weight vectors in a transfer learning framework. Let x represents the feature vector of a candidate web page, and y \u2208 {+1, -1} represent a class label. Let", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "T N i T i T i T y x D 1 )} , {( \uf03d \uf03d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "denote the set of labeled instances for the target type T. Let", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "k N i k i k i k y x D 1 )} , {( \uf03d \uf03d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "denotes the labeled instance for the auxiliary type A k .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We learn the optimal weight vectors", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u03a4 K k k \uf06d \uf06d, } { 1 \uf03d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "and v by optimizing the following objective function:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "] ) , ( ) , ( [ ) , , } ({ 1 2 2 2 1 0 , , }, { 1 min arg \uf0e5 \uf0e5 \uf03d \uf03d \uf03d \uf03d \uf02b \uf02b \uf02b \uf02b \uf02b \uf02b \uf03d K k v k k T T K k k k T T Fv v \u03a4 K k k v v D L v D L v \u03a4 k \uf06c \uf06d \uf06c \uf06d \uf06c \uf06d \uf06d \uf06d \uf06d \uf06d \uf06d \uf06d \uf06d (6)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Once we get the most probable content, we use the alignment algorithm to find the repeat patterns. We first split the union tree into several subtrees according to the content block nodes. The alignment algorithm is used to measure the similarity between subtrees. Also in the alignment, we consider about node's weight according to the level and category. This step is especially useful for the web pages coming from Forums and Blogs. In the News web pages, the content block itself is usually used as the extracting pattern.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Thus, by alignment, merging, finding content block and mining the repeat patterns, we can get a wrapper to extract the data from web pages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transfer learning method for the most probable content block detecting", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The wrapper maintenance arises because the template of the web source may experiment changes that invalidate the current wrappers. Figure 2 shows an example of the template change detection. The x-axis shows the web pages of one website ordered by the timestamp. Let time(i) be the time of the webpage i, then time(i)<time(j) for i<j. The y-axis shows the similarities between the current wrapper and the input web pages. The similarities are calculated using the tree alignment algorithm presented in this paper. The website\"s template changed at tc which causes the low similarities between the current wrapper and the web pages after tc. This means that the wrapper should also change due to the change of the website\"s template. In this scenario, the wrapper maintenance includes two steps. The first step is the detection of the change points in the similarity series. The second step is repairing or re-generating the wrapper using the web pages after tc. We use the log likelihood ratio test (Zeitouni et al., 1992) to detect the change points. Because the likelihood ratios describe evidence rather than embody a decision, they can easily be adapted to the various goals for which inferential statistics might be used. The likelihood ratios provide an intuitive approach to summarizing the evidence provided by an experiment in wrapper maintenance scenario. Let p be the actual distribution of the similarities series. 0 \uf071 p be the distribution under H 0 1 \uf071 p be the distribution under H 1 We use y i to denote the similarity between the i-th web page and the current wrapper. Let us introduce the following hypotheses: ", |
| "cite_spans": [ |
| { |
| "start": 999, |
| "end": 1022, |
| "text": "(Zeitouni et al., 1992)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 131, |
| "end": 139, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Wrapper Maintenance", |
| "sec_num": "4" |
| }, |
| { |
| "text": ") , , | ( ) , , | ( 1 1 1 1 0 y y y p y y y p i i i i \uf04c \uf04c \uf02d \uf02d \uf03d \uf071 H 1 : There is a time t c such that For ) , , | ( ) , , | ( : 1 1 1 1 1 1 0 y y y p y y y p t i i i i i c \uf04c \uf04c \uf02d \uf02d \uf03d \uf02d \uf0a3 \uf0a3 \uf071 For ) , , | ( ) , , | ( : 1 1 1 c c t i i t i i c y y y p y y y p k i t \uf04c \uf04c \uf02d \uf02d \uf03d \uf0a3 \uf0a3 \uf071 (7)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Maintenance", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We use the log of likelihood ratio ) (", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Maintenance", |
| "sec_num": "4" |
| }, |
| { |
| "text": ") ( 0 1 i i i y L y L ln S \uf071 \uf071 \uf03d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Maintenance", |
| "sec_num": "4" |
| }, |
| { |
| "text": "to indicate the relative probability of the data. To simplify, suppose that y i are independent and normally distributed with common variance", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Maintenance", |
| "sec_num": "4" |
| }, |
| { |
| "text": "2 \uf073 . Consider ) , ( 2 \uf073 \uf06d N y i . 0 \uf06d \uf06d \uf03d under H 0 1 \uf06d \uf06d \uf03d under H 1 We get \uf0e5 \uf03d \uf02d \uf02d \uf03d k j i i k j v y b S ) 2 ( 0 \uf06d \uf073 Where: \uf073 \uf06d \uf06d \uf06d \uf06d 0 1 0 1 , \uf02d \uf03d \uf02d \uf03d b v", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Maintenance", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Hence, we get the following hypotheses:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Maintenance", |
| "sec_num": "4" |
| }, |
| { |
| "text": "H 0 : ) , 2 ( 2 2 2 mb mv N S k j \uf073 \uf02d H 1 : ) , 2 ( 2 2 2 mb mv N S k j \uf073 (9)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Maintenance", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The problem becomes testing whether k", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Maintenance", |
| "sec_num": "4" |
| }, |
| { |
| "text": "j S is ) , 2 ( 2 2 2 mb mv N \uf073 \uf02d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Maintenance", |
| "sec_num": "4" |
| }, |
| { |
| "text": ". If H 1 holds, a change point is reported at j-th web pages. Take Figure 6 as an example, we use a window (size: m=(tn-ta)), and calculate the m j j S \uf02b . The H 1 holds at i+m will raise an alarm at i. for example, the H 1 holds at tn and raises an alarm at ta. There is (ta-tc) delay since the change point actually occurs at tc. In this paper, different window sizes were set to evaluate the change point detection performance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 67, |
| "end": 75, |
| "text": "Figure 6", |
| "ref_id": "FIGREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Wrapper Maintenance", |
| "sec_num": "4" |
| }, |
| { |
| "text": "As shown above, we assume yi are independent and normally distributed with common variance 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Maintenance", |
| "sec_num": "4" |
| }, |
| { |
| "text": "\uf073 . This tends to be less flexible since it relies on a model assumption. In the community of statistics, some non-parametric density estimation is used for calculating the likelihood ratio (Brodsky and Darkhovsky, 1993) . However, non-parametric density estimation is known to be a hard problem (Hardel et al. 2004; Huang et al., 2007) , it may not be promising in the practice. The experimental results in Section 3 have shown this. One way to alleviating this difficulty is to directly estimate the ratio of probability densities, not the probability densities themselves. Recently, direct density-ratio estimation has been actively explored in the machine learning and the Kullback Leibler Importance Estimation Procedure (KLIEP) (Sugiyama et al., 2008) . We also use the KLIEP and give the experimental results in Section 5.", |
| "cite_spans": [ |
| { |
| "start": 190, |
| "end": 220, |
| "text": "(Brodsky and Darkhovsky, 1993)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 296, |
| "end": 316, |
| "text": "(Hardel et al. 2004;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 317, |
| "end": 336, |
| "text": "Huang et al., 2007)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 734, |
| "end": 757, |
| "text": "(Sugiyama et al., 2008)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Wrapper Maintenance", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The wrapper generation approach is compared with the Zhai and Liu(2005) (PA in Table 1 ) and Zigoris et al.(2006) (SVM in Table 1 ). For each web page category (News, Forum and Blog), 40 websites and 6000 web pages were selected for the experiments.", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 113, |
| "text": "Zigoris et al.(2006)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 114, |
| "end": 121, |
| "text": "(SVM in", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 79, |
| "end": 86, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 122, |
| "end": 129, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation of Wrapper Generation", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The experimental results are shown in Table 1 . PA performs better in the news websites than in the forum and blog while our method gets the best performance in blogs. It\"s because that PA method is more suitable for news websites. The experimental results also showed that the transfer learning for tree alignment parameters outperform the SVM method. We evaluated the system performance under different size of union tree. Figure 3 shows the experimental results. Here the \"news_p\" and \"news_r\" are the precision and recall gotten from the news corpus respectively, and \"forum_p\" and \"forum_r\" are the precision and recall gotten from the forum corpus respectively. The x axis shows number of the trees merged into the union tree. We can see that the system tend to convergence while the number is about 30. That means we can use about 30 samples to build the union tree and to get the extraction template, which will save much time.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 38, |
| "end": 45, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 425, |
| "end": 433, |
| "text": "Figure 3", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation of Wrapper Generation", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We evaluated the impact of the size of the collection for getting the tag-matching weight. From the Figure 4 , we can see that the system performance better on the blog corpus again. We should notice that the weight setting procedure needs more samples than the union tree building (50 web pages in the forum and blog corpus, 80 web pages in the news corpus). Since the weight setting is time-consuming (C n 2 tree alignment on the collection of n web pages), we think it\"s better to get them in advance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 100, |
| "end": 108, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation of Wrapper Generation", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We also evaluated the annotated samples needed for the transfer learning method. The experiment results are shown in Figure 5 . We can see that the more the annotated samples, the better the system performance. The system tends to convergence while the number of the annotated sample is about 200. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 117, |
| "end": 125, |
| "text": "Figure 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation of Wrapper Generation", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In order to evaluate the effectiveness of the proposed wrapper maintenance approach, we monitored a set of websites for several months. Our experiment was designed to model the scenario in which the goal is to detect whether a site's template has changed. For a fixed site, we got a sequence (p 1 , p 2 , ..., p n ) of gathered pages. The wrapper maintenance system should return TRUE iff the site's template changes at p i which means w i-1 \u2260 w i . Where the w i is the wrapper for p i . We measured the performance in term of 2*2 matrix shown in Table 2 . ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 548, |
| "end": 555, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation of Wrapper Maintenance", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "w i-1 =w i w i-1 \u2260w i Predict TRUE n1 n2 Predict FALSE n3 n4", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation of Wrapper Maintenance", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Several metrics are derived from this matrix. We manually made some data for wrapper maintenance evaluation because the real data is not enough (26, 30 and 35 for News, Forums and Blogs respectively). We made some test data manually. We change the template of website and put data in it to make some new pages. These kinds of data make the wrapper maintenance problem more difficult. We got 100 data sets for each type of websites (News, Forums and Blogs) including the real data gathered. Experimental results are shown in Figure 6 . We can see that the performance on the merged test set (News_M, Forum_M, Blog_M) was worse than that on the real test set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 524, |
| "end": 532, |
| "text": "Figure 6", |
| "ref_id": "FIGREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation of Wrapper Maintenance", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "As shown in Section 4, there are parametric and non-parametric density estimation method and direct density-ratio estimation method for the log likelihood ration test. We compared these methods and got the experimental results shown in the Figure 7, 8 and 9 . Here, the \"LLR-Norm\" means the parametric method using normal distribution, \"LLR-Non\" means the non-parametric method and \"KLIEP\" means the direct density-ratio estimation method shown in (Sugiyama et al., 2008) . The experimental results show that the LLR-Norm method outperforms the LLR-Non and KLIEP. As mentioned in Section 4, non-parametric density estimation is known to be a hard problem (Hardel et al. 2004; Huang et al., 2007) , it may not be promising in the practice. The reason why the KLIEP is worse than LLR-Norm is that it is a batch algorithm and not suitable for the change point detection. The experiments follow the principles of Occam's Razor. In many cases, the simplest solution is usually the correct one. ", |
| "cite_spans": [ |
| { |
| "start": 448, |
| "end": 471, |
| "text": "(Sugiyama et al., 2008)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 655, |
| "end": 675, |
| "text": "(Hardel et al. 2004;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 676, |
| "end": 695, |
| "text": "Huang et al., 2007)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 240, |
| "end": 257, |
| "text": "Figure 7, 8 and 9", |
| "ref_id": "FIGREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation of Wrapper Maintenance", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In this work, a method that uses tree alignment and transfer learning method is proposed to generate the wrapper from web pages of Forums, Blogs and News web sites. The tree alignment algorithm is adopted to find the best matching structure of the input web pages. A kind of linear regression method is employed to get the weight of different tag-matching. Based on the alignment, we merge the trees into one union tree whose nodes record the statistical information gotten from multiple web pages. We use a transfer learning method to find the most likely content block and use the alignment algorithm to detect the repeat patterns on the union tree. In the wrapper maintenance approach, the tree alignment algorithm is utilized as a metric of the similarity between wrapper and web pages. A log likelihood ratio test is adopted to detect the change points on the similarity series. The wrapper generation method is applied again to generate a wrapper once the web source change is detected. This joint wrapper generation and maintenance method can be applied to the kind of websites whose web pages are dynamically generated using a common template populated with data from databases. Experimental results show that the method achieves high accuracy and has steady performance. There are several important issues remaining to be addressed in the future work. It is important to make the most probable content block detecting more accurately and thus enhance the repeating pattern mining procedure. In wrapper maintenance problem, it\"s a challenge to explore using fewer samples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Works", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Extracting structured data from Web pages", |
| "authors": [ |
| { |
| "first": "Arasu", |
| "middle": [], |
| "last": "Hector", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [ |
| "M" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 2003 ACM SIGMOD international conference", |
| "volume": "", |
| "issue": "", |
| "pages": "337--348", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arasu A and Hector GM. 2003. Extracting structured data from Web pages. Proceedings of the 2003 ACM SIGMOD international conference. San Diego, California. PP337-348.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Cut and Paste", |
| "authors": [ |
| { |
| "first": "Atzeni P", |
| "middle": [], |
| "last": "Mecca", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Proceedings of the sixteenth ACM SIGACT-SIGMOD-SIGART symposium", |
| "volume": "", |
| "issue": "", |
| "pages": "144--153", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Atzeni P and Mecca G. 1997. Cut and Paste. Proceedings of the sixteenth ACM SIGACT- SIGMOD-SIGART symposium. Tucson, Arizona, United States. PP144-153.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "IEPAD: Information extraction based on pattern discovery", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "L" |
| ], |
| "last": "Lui", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the 10th World Wide Web. Hong Kong", |
| "volume": "", |
| "issue": "", |
| "pages": "681--688", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chang C. and Lui SL. 2001. IEPAD: Information extraction based on pattern discovery. Proceedings of the 10th World Wide Web. Hong Kong. PP681-688.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Nonparametric Methods in Change-Point Problems", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Brodsky", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Darkhovsky", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brodsky B and Darkhovsky B. Nonparametric Methods in Change-Point Problems, Kluwer Academic Publishers, 1993.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A survey of Web information extraction systems", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "H" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Kayed", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "R" |
| ], |
| "last": "Girgis", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Shaalan", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "IEEE Transactions On Knowledge and Data Engineering", |
| "volume": "", |
| "issue": "", |
| "pages": "1411--1428", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chang CH, Kayed M, Girgis MR, Shaalan K. 2006. A survey of Web information extraction systems. IEEE Transactions On Knowledge and Data Engineering, 2006, pp1411-1428", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Automatic repairing of web wrap-pers", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Chidlovskii", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the 3rd international workshop on Web information and data management", |
| "volume": "", |
| "issue": "", |
| "pages": "24--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chidlovskii B. 2001. Automatic repairing of web wrap-pers. Proceedings of the 3rd international workshop on Web information and data management. Atlanta, Georgia, USA, 9 November 2001, PP24-30", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A flexible learning system for wrapping tables and lists in HTML documents", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hurst", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Jensen", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 11th inter-national conference on World Wide Web", |
| "volume": "", |
| "issue": "", |
| "pages": "232--241", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cohen W, Hurst M, and Jensen L. 2002. A flexible learning system for wrapping tables and lists in HTML documents. Proceedings of the 11th inter-national conference on World Wide Web. Honolulu, Hawaii, USA. PP232-241.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Grammars have ex-ceptions", |
| "authors": [ |
| { |
| "first": "Crescenzi V", |
| "middle": [], |
| "last": "Mecca", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Information Systems", |
| "volume": "23", |
| "issue": "8", |
| "pages": "539--565", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Crescenzi V and Mecca G. 1998. Grammars have ex-ceptions. Information Systems, 23(8), PP539-565", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Roadrunner: Towards automatic data extraction from large web sites", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Crescenzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mecca", |
| "middle": [ |
| "G" |
| ], |
| "last": "Merialdo", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "proceedings of the 26th International Conference on Very Large Database Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "109--118", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Crescenzi V, Mecca G and Merialdo P. 2001. Roadrunner: Towards automatic data extraction from large web sites. In proceedings of the 26th International Conference on Very Large Database Systems. Rome, Italy, 2001. PP 109-118.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A Densitometric Analysis of Web Template Content", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Christian", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1165--1166", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian K. 2009. A Densitometric Analysis of Web Template Content. WWW 2009, April 20- 24, 2009, Madrid, Spain. PP 1165-1166", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Non-parametric and Semi-parametric Models", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Hardel", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Muller", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Sperlich", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Werwatz", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Springer Series in Statistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hardel W, Muller M, Sperlich S and Werwatz A. Non-parametric and Semi-parametric Models, Springer Series in Statistics, Springer, Berlin, 2004", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Jedi: extracting and synthesizing information from the web", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Huck", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Frankhause", |
| "suffix": "" |
| }, |
| { |
| "first": "Aberer", |
| "middle": [ |
| "K" |
| ], |
| "last": "Neuhold", |
| "suffix": "" |
| }, |
| { |
| "first": "E J", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of 3rd IFCIS International Conference on Cooperative Information Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "32--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Huck G, Frankhause P, Aberer K and Neuhold E J. 1998. Jedi: extracting and synthesizing information from the web. In Proceedings of 3rd IFCIS International Conference on Cooperative Information Systems. New York, USA, 20-22 Aug 1998. PP32-41.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Correcting Sample Selection Bias by Unla-beled Data", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Smola", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Gretton", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Borgwardt", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Schol-Kopf B", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Advance in Neural Information Processing Systems", |
| "volume": "19", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Huang J, Smola A, Gretton A, Borgwardt K and Schol-kopf B, Correcting Sample Selection Bias by Unla-beled Data, In Advance in Neural Information Processing Systems, Vol. 19, MiT press, Cambridge, MA, 2007", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Interactive wrapper genera-tion with minimal user effort", |
| "authors": [ |
| { |
| "first": "U", |
| "middle": [], |
| "last": "Irmak", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Suel", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 15th World Wide Web", |
| "volume": "", |
| "issue": "", |
| "pages": "553--563", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Irmak U and Suel T. 2006. Interactive wrapper genera-tion with minimal user effort. In Proceedings of the 15th World Wide Web. Scotland, May 23-26, 2006. PP553-563.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Multi-task Transfer Learning for Weakly-Supervised Relation Extraction. The 47 th Association for Computational Linguistics", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Jing", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1012--1020", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jing J. 2009. Multi-task Transfer Learning for Weakly-Supervised Relation Extraction. The 47 th Association for Computational Linguistics, August 2009, pp1012-1020.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Kushmerick N. 1999, Regression testing for wrapper maintenance", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Keogh", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Chu", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Hart", |
| "suffix": "" |
| }, |
| { |
| "first": "Pazzani", |
| "middle": [ |
| "M" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proceedings of the 14th National Conference on Artificial Intelligence (AAAI-1999)", |
| "volume": "", |
| "issue": "", |
| "pages": "74--79", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Keogh E, Chu S, Hart D, and Pazzani M. 2003. Seg-menting time series: A survey and novel approach. In Data Mining in Time Series Databases. second ed. World Scientific, 2003. Kushmerick N. 1999, Regression testing for wrapper maintenance. In Proceedings of the 14th National Conference on Artificial Intelligence (AAAI-1999), 1999, PP74-79", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Wrapper induction: efficiency and expressiveness", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Kushmerick", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Artificial Intelligence", |
| "volume": "118", |
| "issue": "1-2", |
| "pages": "15--68", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kushmerick N. 2000. Wrapper induction: efficiency and expressiveness. Artificial Intelligence, 118(1-2), April 2000, PP15-68.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "A Brief Survey of Web Data Extraction Tools", |
| "authors": [ |
| { |
| "first": "Ahf", |
| "middle": [], |
| "last": "Laender", |
| "suffix": "" |
| }, |
| { |
| "first": "Ribeiro", |
| "middle": [], |
| "last": "Neto", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [ |
| "A" |
| ], |
| "last": "Da Silva", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "D" |
| ], |
| "last": "Teixeira", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "S" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "SIGMOD Record", |
| "volume": "31", |
| "issue": "2", |
| "pages": "84--93", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laender AHF, Ribeiro Neto BA, da Silva AD, Teixeira JS, 2002. A Brief Survey of Web Data Extraction Tools. SIGMOD Record, 31(2), June 2002. pp84-93", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Wrapper Maintenance: A Machine Learning Approach", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Lerman", |
| "suffix": "" |
| }, |
| { |
| "first": "S N", |
| "middle": [], |
| "last": "Minton", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "A" |
| ], |
| "last": "Knoblock", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Jour-nal of Artificial Intelligence Research", |
| "volume": "18", |
| "issue": "", |
| "pages": "149--181", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lerman K, Minton S N, Knoblock C A. 2003. Wrapper Maintenance: A Machine Learning Approach. Jour-nal of Artificial Intelligence Research 18(2003), PP149-181", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Using the Structure of Web Sites for Automatic Segmentation of Tables", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Lerman", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Getoor", |
| "suffix": "" |
| }, |
| { |
| "first": "Minton", |
| "middle": [ |
| "S" |
| ], |
| "last": "Knoblock", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 2004 ACM SIGMOD international conference on Man-agement of data", |
| "volume": "", |
| "issue": "", |
| "pages": "119--130", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lerman K, Getoor L, Minton S and Knoblock C. 2004. Using the Structure of Web Sites for Automatic Segmentation of Tables. In Proceedings of the 2004 ACM SIGMOD international conference on Man-agement of data. Paris, France, 2004. PP119-130", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Schema-Guided Wrap-per Maintenance for Web-Data Extraction", |
| "authors": [ |
| { |
| "first": "X", |
| "middle": [ |
| "F" |
| ], |
| "last": "Meng", |
| "suffix": "" |
| }, |
| { |
| "first": "D D", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 5th ACM international workshop on Web information and data management", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Meng XF, Hu D D, Li C. 2003. Schema-Guided Wrap-per Maintenance for Web-Data Extraction, Proceedings of the 5th ACM international workshop on Web information and data management, November 7-8, 2003, New Orleans, Louisiana, USA.,PP1-8", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "A hierarchical approach to wrapper induction", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Muslea", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Minton", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Knoblock", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proceedings of the third annual conference on Autonomous Agents", |
| "volume": "", |
| "issue": "", |
| "pages": "190--197", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muslea I, Minton S and Knoblock C. 1999. A hierarchical approach to wrapper induction. In Proceedings of the third annual conference on Autonomous Agents, Seattle, Washington, United States, 1999. PP190-197.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Table Extraction Using Conditional Random Fields", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Pinto", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [ |
| "X" |
| ], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Bruce", |
| "middle": [ |
| "W" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 26th annual international ACM SIGIR conference on Research and development in information retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "235--242", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pinto D, McCallum A, Wei X and Bruce W. 2003. Table Extraction Using Conditional Random Fields. Proceedings of the 26th annual international ACM SIGIR conference on Research and development in information retrieval. Toronto, Canada, 2003. PP235-242.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Automatically maintaining wrappers for semistructured web sources", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Raposo", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Pan", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "\u00c1lvarez", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Hidalgo", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 9th International Database Engineering & Application Symposium", |
| "volume": "", |
| "issue": "", |
| "pages": "105--114", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raposo J, Pan A, \u00c1lvarez M , Hidalgo J. 2005. Automatically maintaining wrappers for semi- structured web sources. Proceedings of the 9th International Database Engineering & Application Symposium. 25-27 July 2005, Montreal, Canada. PP 105-114", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Web ecology: Re-cycling HTML pages as XML documents suing W4F", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Sahuguet", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Azavant", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proceedings of ACM SIGMOD Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sahuguet A and Azavant F. 1999. Web ecology: Re-cycling HTML pages as XML documents suing W4F. In Proceedings of ACM SIGMOD Workshop, Pennsylvania, June 3-4, 1999.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "ViPER: Augmenting automatic information extraction with visual perceptions", |
| "authors": [ |
| { |
| "first": "Simon", |
| "middle": [ |
| "K" |
| ], |
| "last": "Lausen", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 14th ACM international conference on Information and knowledge management", |
| "volume": "", |
| "issue": "", |
| "pages": "381--388", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simon K and Lausen G. 2005. ViPER: Augmenting automatic information extraction with visual perceptions. In Proceedings of the 14th ACM international conference on Information and knowledge management. Bremen, Germany, 2005. pp 381-388.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Direct Importance Estimation for Covariance Shift Adaptation", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Sugiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Suzuki", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Kashima", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Von", |
| "middle": [], |
| "last": "Bunau", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Kawanabe", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Annals of the Institute of Statistical Mathematics", |
| "volume": "60", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sugiyama M, Suzuki T, nakajima S, Kashima H, Von Bunau P and Kawanabe M. Direct Importance Estimation for Covariance Shift Adaptation, Annals of the Institute of Statistical Mathematics, 60(4), 2008", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Data extraction and label assignment for Web databases", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [ |
| "H" |
| ], |
| "last": "Lochovsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 12th World Wide Web", |
| "volume": "", |
| "issue": "", |
| "pages": "187--196", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang JY, Lochovsky FH. 2003.Data extraction and label assignment for Web databases. In Proceedings of the 12th World Wide Web. Budapest,Hungary, 2003. PP187-196.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "A machine learning based approach for table detection on the Web", |
| "authors": [ |
| { |
| "first": "Wang", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceed-ings of the 11th World Wide Web", |
| "volume": "", |
| "issue": "", |
| "pages": "242--250", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang Y and Hu J. 2002. A machine learning based approach for table detection on the Web. In Proceed-ings of the 11th World Wide Web. Honolulu, Hawaii, USA, 2002. PP242-250.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "When Is the Generalized Likelihood Ration Test Optimal?", |
| "authors": [ |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Zeitouni", |
| "suffix": "" |
| }, |
| { |
| "first": "Ziv", |
| "middle": [ |
| "J" |
| ], |
| "last": "Merhav", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "IEEE Transactions on Information Theory", |
| "volume": "38", |
| "issue": "5", |
| "pages": "1597--1602", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeitouni O, Ziv J and Merhav N. 1992. When Is the Generalized Likelihood Ration Test Optimal? IEEE Transactions on Information Theory, VOL 38, No. 5, PP1597-1602", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Web data extraction based on partial tree alignment", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [ |
| "H" |
| ], |
| "last": "Zhai", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 14th World Wide Web", |
| "volume": "", |
| "issue": "", |
| "pages": "76--85", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhai YH and Liu B. 2005. Web data extraction based on partial tree alignment. In Proceedings of the 14th World Wide Web. Chiba, Japan, May 10-14, 2005, PP76-85.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Fully Automatic Wrapper Generation For Search Engines", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [ |
| "K" |
| ], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Meng", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [ |
| "H" |
| ], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Raghavan", |
| "middle": [ |
| "V" |
| ], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [ |
| "C" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 14th World Wide Web", |
| "volume": "", |
| "issue": "", |
| "pages": "66--75", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhao HK, Meng WY, Wu ZH, Raghavan V and Yu C. 2005. Fully Automatic Wrapper Generation For Search Engines. In Proceedings of the 14th World Wide Web. Chiba, Japan, May 10-14, 2005, PP66-75", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Joint Optimization of Wrapper Generation and Template Detection", |
| "authors": [ |
| { |
| "first": "Sh Y", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "R H", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "R" |
| ], |
| "last": "Wen", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "894--902", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zheng SH Y, Wu D, Song R H, Wen J R. 2007. Joint Optimization of Wrapper Generation and Template Detection. KDD 2007, August 12-15, 2007, San Jose, California USA, PP894-902", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Unsupervised Learning of Tree Alignment Models for Information Extraction", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Zigoris", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Eads", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Sixth IEEE International Conference on Data Mining -Workshops", |
| "volume": "", |
| "issue": "", |
| "pages": "45--49", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zigoris P, Eads D, Zhang Y. 2006. Unsupervised Learning of Tree Alignment Models for Information Extraction. Sixth IEEE International Conference on Data Mining -Workshops 2006, PP.45-49", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "text": "An example of weight setting", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "text": "An example of template change detection", |
| "uris": null |
| }, |
| "FIGREF4": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Impact of number of trees merged into the union tree", |
| "uris": null |
| }, |
| "FIGREF5": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Impact of the number of samples for getting the tag-matching weight Impact of the number of annotated samples for transfer learning", |
| "uris": null |
| }, |
| "FIGREF7": { |
| "num": null, |
| "type_str": "figure", |
| "text": "The experimental results on real and merged test set", |
| "uris": null |
| }, |
| "FIGREF8": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Experimental results on News websites", |
| "uris": null |
| }, |
| "FIGREF9": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Experimental results on Forums Experimental results on Blogs", |
| "uris": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "num": null, |
| "text": "The experimental results", |
| "content": "<table><tr><td/><td>PA</td><td>SVM</td><td>Our method</td></tr><tr><td>News</td><td colspan=\"2\">Precision 0.912 0.892 Recall 0.865 0.933</td><td>0.903 0.956</td></tr><tr><td>Forum</td><td colspan=\"2\">Precision 0.845 0.918 Recall 0.891 0.946</td><td>0.932 0.965</td></tr><tr><td>Blog</td><td colspan=\"2\">Precision 0.848 0.921 Recall 0.903 0.958</td><td>0.941 0.969</td></tr></table>", |
| "html": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "num": null, |
| "text": "Wrapper maintenance evaluation matrix", |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |