| { |
| "paper_id": "P11-1012", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:47:13.820926Z" |
| }, |
| "title": "Query Weighting for Ranking Model Adaptation", |
| "authors": [ |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "East China Normal University", |
| "location": { |
| "settlement": "Shanghai", |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Hong Kong", |
| "location": { |
| "settlement": "Shatin, Hong Kong", |
| "region": "N.T" |
| } |
| }, |
| "email": "wgao@se.cuhk.edu.hk" |
| }, |
| { |
| "first": "Aoying", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "East China Normal University", |
| "location": { |
| "settlement": "Shanghai", |
| "country": "China" |
| } |
| }, |
| "email": "ayzhou@sei.ecnu.edu.cn" |
| }, |
| { |
| "first": "Kam-Fai", |
| "middle": [], |
| "last": "Wong", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Hong Kong", |
| "location": { |
| "settlement": "Shatin, Hong Kong", |
| "region": "N.T" |
| } |
| }, |
| "email": "kfwong@se.cuhk.edu.hk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We propose to directly measure the importance of queries in the source domain to the target domain where no rank labels of documents are available, which is referred to as query weighting. Query weighting is a key step in ranking model adaptation. As the learning object of ranking algorithms is divided by query instances, we argue that it's more reasonable to conduct importance weighting at query level than document level. We present two query weighting schemes. The first compresses the query into a query feature vector, which aggregates all document instances in the same query, and then conducts query weighting based on the query feature vector. This method can efficiently estimate query importance by compressing query data, but the potential risk is information loss resulted from the compression. The second measures the similarity between the source query and each target query, and then combines these fine-grained similarity values for its importance estimation. Adaptation experiments on LETOR3.0 data set demonstrate that query weighting significantly outperforms document instance weighting methods.", |
| "pdf_parse": { |
| "paper_id": "P11-1012", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We propose to directly measure the importance of queries in the source domain to the target domain where no rank labels of documents are available, which is referred to as query weighting. Query weighting is a key step in ranking model adaptation. As the learning object of ranking algorithms is divided by query instances, we argue that it's more reasonable to conduct importance weighting at query level than document level. We present two query weighting schemes. The first compresses the query into a query feature vector, which aggregates all document instances in the same query, and then conducts query weighting based on the query feature vector. This method can efficiently estimate query importance by compressing query data, but the potential risk is information loss resulted from the compression. The second measures the similarity between the source query and each target query, and then combines these fine-grained similarity values for its importance estimation. Adaptation experiments on LETOR3.0 data set demonstrate that query weighting significantly outperforms document instance weighting methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Learning to rank, which aims at ranking documents in terms of their relevance to user's query, has been widely studied in machine learning and information retrieval communities (Herbrich et al., 2000; Freund et al., 2004; Burges et al., 2005; Yue et al., 2007; Cao et al., 2007; Liu, 2009) . In general, large amount of training data need to be annotated by domain experts for achieving better ranking performance. In real applications, however, it is time consuming and expensive to annotate training data for each search domain. To alleviate the lack of training data in the target domain, many researchers have proposed to transfer ranking knowledge from the source domain with plenty of labeled data to the target domain where only a few or no labeled data is available, which is known as ranking model adaptation (Chen et al., 2008a; Chen et al., 2010; Chen et al., 2008b; Geng et al., 2009; Gao et al., 2009) .", |
| "cite_spans": [ |
| { |
| "start": 177, |
| "end": 200, |
| "text": "(Herbrich et al., 2000;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 201, |
| "end": 221, |
| "text": "Freund et al., 2004;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 222, |
| "end": 242, |
| "text": "Burges et al., 2005;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 243, |
| "end": 260, |
| "text": "Yue et al., 2007;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 261, |
| "end": 278, |
| "text": "Cao et al., 2007;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 279, |
| "end": 289, |
| "text": "Liu, 2009)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 818, |
| "end": 838, |
| "text": "(Chen et al., 2008a;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 839, |
| "end": 857, |
| "text": "Chen et al., 2010;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 858, |
| "end": 877, |
| "text": "Chen et al., 2008b;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 878, |
| "end": 896, |
| "text": "Geng et al., 2009;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 897, |
| "end": 914, |
| "text": "Gao et al., 2009)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Intuitively, the more similar an source instance is to the target instances, it is expected to be more useful for cross-domain knowledge transfer. This motivated the popular domain adaptation solution based on instance weighting, which assigns larger weights to those transferable instances so that the model trained on the source domain can adapt more effectively to the target domain (Jiang and Zhai, 2007) . Existing instance weighting schemes mainly focus on the adaptation problem for classification (Zadrozny, 2004; Huang et al., 2007; Jiang and Zhai, 2007; Sugiyama et al., 2008) .", |
| "cite_spans": [ |
| { |
| "start": 386, |
| "end": 408, |
| "text": "(Jiang and Zhai, 2007)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 505, |
| "end": 521, |
| "text": "(Zadrozny, 2004;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 522, |
| "end": 541, |
| "text": "Huang et al., 2007;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 542, |
| "end": 563, |
| "text": "Jiang and Zhai, 2007;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 564, |
| "end": 586, |
| "text": "Sugiyama et al., 2008)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Although instance weighting scheme may be applied to documents for ranking model adaptation, the difference between classification and learning to rank should be highlighted to take careful consideration. Compared to classification, the learning object for ranking is essentially a query, which contains a list of document instances each with a relevance judgement. Recently, researchers proposed listwise ranking algorithms (Yue et al., 2007; Cao et al., 2007) to take the whole query as a learning object. The benchmark evaluation showed that list- Figure 1 : The information about which document instances belong to the same query is lost in document instance weighting scheme. To avoid losing this information, query weighting takes the query as a whole and directly measures its importance.", |
| "cite_spans": [ |
| { |
| "start": 425, |
| "end": 443, |
| "text": "(Yue et al., 2007;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 444, |
| "end": 461, |
| "text": "Cao et al., 2007)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 551, |
| "end": 559, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "wise approach significantly outperformed pointwise approach, which takes each document instance as independent learning object, as well as pairwise approach, which concentrates learning on the order of a pair of documents (Liu, 2009) . Inspired by the principle of listwise approach, we hypothesize that the importance weighting for ranking model adaptation could be done better at query level rather than document level. Figure 1 demonstrates the difference between instance weighting and query weighting, where there are two queries q s1 and q s2 in the source domain and q t1 and q t2 in the target domain, respectively, and each query has three retrieved documents. In Figure 1 (a), source and target domains are represented as a bag of document instances. It is worth noting that the information about which document instances belong to the same query is lost. To avoid this information loss, query weighting scheme shown as Figure 1 (b) directly measures importance weight at query level.", |
| "cite_spans": [ |
| { |
| "start": 222, |
| "end": 233, |
| "text": "(Liu, 2009)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 422, |
| "end": 430, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 673, |
| "end": 681, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 930, |
| "end": 938, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Instance weighting makes the importance estimation of document instances inaccurate when documents of the same source query are similar to the documents from different target queries. Take Figure 2 as a toy example, where the document instance is represented as a feature vector with four features. No matter what weighting schemes are used, it makes sense to assign high weights to source queries q s1 and q s2 because they are similar to target queries q t1 and q t2 , respectively. Meanwhile, the source query q s3 should be weighted lower because it's not quite similar to any of q t1 and q t2 at query level, meaning that the ranking knowledge from q s3 is different from that of q t1 and q t2 and thus less useful for the transfer to the target domain. Unfortunately, the three source queries q s1 , q s2 and q s3 would be weighted equally by document instance weighting scheme. The reason is that all of their documents are similar to the two document instances in target domain despite the fact that the documents of q s3 correspond to their counterparts from different target queries.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 184, |
| "end": 197, |
| "text": "Take Figure 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "<d 1 s1 >=( 5, 1, 0 ,0 ) <d 2 s1 >=( 6, 2, 0 ,0 ) <d 1 s2 >=( 0, 0, 5, 1) <d 2 s2 >=( 0, 0, 6, 2) <d 1 s3 >=( 5, 1, 0, 0) <d 2 s3 >=( 0, 0, 6, 2) <d 1 t1 >=(5, 1, 0 ,0 ) <d 2 t1 >=(6, 2, 0 ,0 ) <d 1 t2 >=( 0, 0, 5, 1) <d 2 t2 >=( 0, 0, 6, 2) q s1 q s2 q s3 q t1 q t2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Therefore, we should consider the source query as a whole and directly measure the query importance. However, it's not trivial to directly estimate a query's weight because a query is essentially provided as a matrix where each row represents a vector of document features. In this work, we present two simple but very effective approaches attempting to resolve the problem from distinct perspectives: (1) we compress each query into a query feature vector by aggregating all of its document instances, and then conduct query weighting on these query feature vectors; (2) we measure the similarity between the source query and each target query one by one, and then combine these fine-grained similarity values to calculate its importance to the target domain.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The basic idea of instance weighting is to put larger weights on source instances which are more similar to target domain. As a result, the key problem is how to accurately estimate the instance's weight indicating its importance to target domain. (Jiang and Zhai, 2007) used a small number of labeled data from target domain to weight source instances. Recently, some researchers proposed to weight source instance only using unlabeled target instances (Shimodaira, 2000; Sugiyama et al., 2008; Huang et al., 2007; Zadrozny, 2004; Gao et al., 2010) . In this work, we also focus on weighting source queries only using unlabeled target queries. (Gao et al., 2010; Ben-David et al., 2010) proposed to use a classification hyperplane to separate source instances from target instances. With the domain separator, the probability that a source instance is classified to target domain can be used as the importance weight. Other instance weighting methods were proposed for the sample selection bias or covariate shift in the more general setting of classifier learning (Shimodaira, 2000; Sugiyama et al., 2008; Huang et al., 2007; Zadrozny, 2004) . (Sugiyama et al., 2008 ) used a natural model selection procedure, referred to as Kullback-Leibler divergence Importance Estimation Procedure (KLIEP), for automatically tuning parameters, and showed that its importance estimation was more accurate. The main idea is to directly estimate the density function ratio of target distribution p t (x) to source distribution p s (x), i.e. w(x) = pt (x) ps (x) . Then model w(x) can be used to estimate the importance of source instances. Model parameters were computed with a linear model by minimizing the KL-divergence from p t (x) to its estimatorp t (x). Sincep t (x) =\u0175(x)p s (x), the ultimate objective only contains model\u0175(x).", |
| "cite_spans": [ |
| { |
| "start": 248, |
| "end": 270, |
| "text": "(Jiang and Zhai, 2007)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 454, |
| "end": 472, |
| "text": "(Shimodaira, 2000;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 473, |
| "end": 495, |
| "text": "Sugiyama et al., 2008;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 496, |
| "end": 515, |
| "text": "Huang et al., 2007;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 516, |
| "end": 531, |
| "text": "Zadrozny, 2004;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 532, |
| "end": 549, |
| "text": "Gao et al., 2010)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 645, |
| "end": 663, |
| "text": "(Gao et al., 2010;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 664, |
| "end": 687, |
| "text": "Ben-David et al., 2010)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1066, |
| "end": 1084, |
| "text": "(Shimodaira, 2000;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1085, |
| "end": 1107, |
| "text": "Sugiyama et al., 2008;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 1108, |
| "end": 1127, |
| "text": "Huang et al., 2007;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1128, |
| "end": 1143, |
| "text": "Zadrozny, 2004)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 1146, |
| "end": 1168, |
| "text": "(Sugiyama et al., 2008", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 1538, |
| "end": 1541, |
| "text": "(x)", |
| "ref_id": null |
| }, |
| { |
| "start": 1545, |
| "end": 1548, |
| "text": "(x)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Instance Weighting Scheme Review", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For using instance weighting in pairwise ranking algorithms, the weights of document instances should be transformed into those of document pairs (Gao et al., 2010) . Given a pair of documents \u27e8x i , x j \u27e9 and their weights w i and w j , the pairwise weight w ij could be estimated probabilistically as w i * w j . To consider query factor, query weight was further estimated as the average value of the weights over all the pairs, i.e., w", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 164, |
| "text": "(Gao et al., 2010)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Instance Weighting Scheme Review", |
| "sec_num": "2" |
| }, |
| { |
| "text": "q = 1 M \u2211 i,j w ij ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Instance Weighting Scheme Review", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where M is the number of pairs in query q. Additionally, to take the advantage of both query and document information, a probabilistic weighting for \u27e8x i , x j \u27e9 was modeled by w q * w ij . Through the transformation, instance weighting schemes for classification can be applied to ranking model adaptation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Instance Weighting Scheme Review", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this section, we extend instance weighting to directly estimate query importance for more effective ranking model adaptation. We present two query weighting methods from different perspectives. Note that although our methods are based on domain separator scheme, other instance weighting schemes such as KLIEP (Sugiyama et al., 2008) can also be extended similarly.", |
| "cite_spans": [ |
| { |
| "start": 313, |
| "end": 336, |
| "text": "(Sugiyama et al., 2008)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our first query weighting method is inspired by the recent work on local learning for ranking (Geng et al., 2008; Banerjee et al., 2009) . The query can be compressed into a query feature vector, where each feature value is obtained by the aggregate of its corresponding features of all documents in the query. We concatenate two types of aggregates to construct the query feature vector:", |
| "cite_spans": [ |
| { |
| "start": 94, |
| "end": 113, |
| "text": "(Geng et al., 2008;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 114, |
| "end": 136, |
| "text": "Banerjee et al., 2009)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "the mean \u20d7 \u00b5 = 1 |q| \u2211 |q| i=1 \u20d7 f i and the variance \u20d7 \u03c3 = 1 |q| \u2211 |q| i=1 ( \u20d7 f i \u2212 \u20d7 \u00b5) 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": ", where \u20d7 f i is the feature vector of document i and |q| denotes the number of documents in q . Based on the aggregation of documents within each query, we can use a domain separator to directly weight the source queries with the set of queries from both domains.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Given query data sets", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "D s = {q i s } m i=1 and D t = {q j t } n j=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "respectively from the source and target do-Algorithm 1 Query Weighting Based on Document Feature Aggregation in the Query Input:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Queries in the source domain, D s = {q i s } m i=1 ; Queries in the target domain, D t = {q j t } n j=1 ; Output:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Importance weights of queries in the source domain,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "IW s = {W i } m i=1 ; 1: y s = \u22121, y t = +1; 2: for i = 1; i \u2264 m; i + + do 3:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Calculate the mean vector \u20d7 \u00b5 i and variance vector \u20d7 \u03c3 i for q i s ;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "4:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Add query feature vector \u20d7 q i s = (\u20d7 \u00b5 i , \u20d7 \u03c3 i , y s ) to D \u2032 s ; 5: end for 6: for j = 1; j \u2264 n; j + + do 7:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Calculate the mean vector \u20d7 \u00b5 j and variance vector \u20d7 \u03c3 j for q j t ;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "8:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Add query feature vector \u20d7 q j t = (\u20d7 \u00b5 j , \u20d7 \u03c3 j , y t ) to D \u2032 t ; 9: end for 10: Find classification hyperplane H st which separates D \u2032 s from D \u2032 t ; 11: for i = 1; i \u2264 m; i + + do", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Document Feature Aggregation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Calculate the distance of \u20d7 q i s to H st , denoted as L(\u20d7 q i s );", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "12:", |
| "sec_num": null |
| }, |
| { |
| "text": "13:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "12:", |
| "sec_num": null |
| }, |
| { |
| "text": "W i = P (q i s \u2208 D t ) = 1 1+exp(\u03b1 * L(\u20d7 q i s )+\u03b2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "12:", |
| "sec_num": null |
| }, |
| { |
| "text": "14:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "12:", |
| "sec_num": null |
| }, |
| { |
| "text": "Add W i to IW s ; 15: end for 16: return IW s ; mains, we use algorithm 1 to estimate the probability that the query q i s can be classified to D t , i.e. P (q i s \u2208 D t ), which can be used as the importance of q i s relative to the target domain. From step 1 to 9, D \u2032 s and D \u2032 t are constructed using query feature vectors from source and target domains. Then, a classification hyperplane H st is used to separate D \u2032 s from D \u2032 t in step 10. The distance of the query feature vector \u20d7 q i s from H st are transformed to the probability P (q i s \u2208 D t ) using a sigmoid function (Platt and Platt, 1999).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "12:", |
| "sec_num": null |
| }, |
| { |
| "text": "Although the query feature vector in algorithm 1 can approximate a query by aggregating its documents' features, it potentially fails to capture important feature information due to the averaging effect during the aggregation. For example, the merit of features in some influential documents may be canceled out in the mean-variance calculation, resulting in many distorted feature values in the query feature vector that hurts the accuracy of query classification hyperplane. This urges us to propose another query weighting method from a different perspective of query similarity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Intuitively, the importance of a source query to the target domain is determined by its overall similarity to every target query. Based on this intuition, we leverage domain separator to measure the similarity between a source query and each one of the target queries, where an individual domain separator is created for each pair of queries. We estimate the weight of a source query using algorithm 2. Note that we assume document instances in the same query are conditionally independent and all queries are independent of each other. In step 3, D \u2032 q i s is constructed by all the document instances {\u20d7 x k } in query q i s with the domain label y s . For each target query q j t , we use the classification hyperplane H ij to estimate P", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "(\u20d7 x k \u2208 D \u2032 q j t )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": ", i.e. the probability that each document \u20d7 x k of q i s is classified into the document set of q j t (step 8). Then the similarity between q i s and q j t is measured by the probability P (q i s \u223c q j t ) at step 9. Finally, the probability of q i s belonging to the target domain P (q i s \u2208 D t ) is calculated at step 11. It can be expected that algorithm 2 will generate Algorithm 2 Query Weighting by Comparing Source and Target Queries Input:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Queries in source domain, D s = {q i s } m i=1 ; Queries in target domain, D t = {q j t } n j=1 ; Output:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Importance weights of queries in source domain,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "IW s = {W i } m i=1 ; 1: y s = \u22121, y t = +1; 2: for i = 1; i \u2264 m; i + + do 3: Set D \u2032 q i s ={\u20d7 x k , y s )} |q i s | k=1 ; 4:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "for j = 1; j \u2264 n; j + + do", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "5: Set D \u2032 q j t ={\u20d7 x k \u2032 , y t )} |q j t | k \u2032 =1 ; 6:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Find a classification hyperplane", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "H ij which separates D \u2032 q i s from D \u2032 q j t ; 7:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "For each k, calculate the distance of", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u20d7 x k to H ij , denoted as L(\u20d7 x k ); 8:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "For each k, calculate", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "P (\u20d7 x k \u2208 D \u2032 q j t ) = 1 1+exp(\u03b1 * L(\u20d7 x k )+\u03b2) ; 9: Calculate P (q i s \u223c q j t ) = 1 |q i s | \u2211 |q i s | k=1 P (\u20d7 x k \u2208 D \u2032 q j t );", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "10:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "end for 11:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Add W i = P (q i s \u2208 D t ) = 1 n \u2211 n j=1 P (q i s \u223c q j t )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "to IW s ; 12: end for 13: return IW s ; more precise measures of query similarity by utilizing the more fine-grained classification hyperplane for separating the queries of two domains.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query Weighting by Comparing Queries across Domains", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "To adapt the source ranking model to the target domain, we need to incorporate query weights into existing ranking algorithms. Note that query weights can be integrated with either pairwise or listwise algorithms. For pairwise algorithms, a straightforward way is to assign the query weight to all the document pairs associated with this query. However, document instance weighting cannot be appropriately utilized in listwise approach. In order to compare query weighting with document instance weighting, we need to fairly apply them for the same approach of ranking. Therefore, we choose pairwise approach to incorporate query weighting. In this section, we extend Ranking SVM (RSVM) (Herbrich et al., 2000; Joachims, 2002) -one of the typical pairwise algorithms for this. Let's assume there are m queries in the data set of source domain, and for each query q i there are \u2113(q i ) number of meaningful document pairs that can be constructed based on the ground truth rank labels. Given ranking function f , the objective of RSVM is presented as follows:", |
| "cite_spans": [ |
| { |
| "start": 687, |
| "end": 710, |
| "text": "(Herbrich et al., 2000;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 711, |
| "end": 726, |
| "text": "Joachims, 2002)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking Model Adaptation via Query Weighting", |
| "sec_num": "4" |
| }, |
| { |
| "text": "min 1 2 || \u20d7 w|| 2 + C m \u2211 i=1 \u2113(q i ) \u2211 j=1 \u03be ij (1) subject to z ij * f ( \u20d7 w, \u20d7 x j(1) q i \u2212 \u20d7 x j(2) q i ) \u2265 1 \u2212 \u03be ij \u03be ij \u2265 0, i = 1, . . . , m; j = 1, . . . , \u2113(q i ) where \u20d7 x j(1) q i and \u20d7 x j(2) q i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking Model Adaptation via Query Weighting", |
| "sec_num": "4" |
| }, |
| { |
| "text": "are two documents with different rank label, and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking Model Adaptation via Query Weighting", |
| "sec_num": "4" |
| }, |
| { |
| "text": "z ij = +1 if \u20d7 x j(1) q i is labeled more relevant than \u20d7 x j(2) q i ; or z ij = \u22121 otherwise. Let \u03bb = 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking Model Adaptation via Query Weighting", |
| "sec_num": "4" |
| }, |
| { |
| "text": "2C and replace \u03be ij with Hinge Loss function (.) + , Equation 1 can be turned to the following form:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking Model Adaptation via Query Weighting", |
| "sec_num": "4" |
| }, |
| { |
| "text": "min \u03bb|| \u20d7 w|| 2 + m \u2211 i=1 \u2113(q i ) \u2211 j=1 ( 1 \u2212 z ij * f ( \u20d7 w, \u20d7 x j(1) q i \u2212 \u20d7 x j(2) q i ) ) +", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking Model Adaptation via Query Weighting", |
| "sec_num": "4" |
| }, |
| { |
| "text": "(2) Let IW (q i ) represent the importance weight of source query q i . Equation 2 is extended for integrating the query weight into the loss function in a straightforward way:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking Model Adaptation via Query Weighting", |
| "sec_num": "4" |
| }, |
| { |
| "text": "min \u03bb|| \u20d7 w|| 2 + m \u2211 i=1 IW (q i ) * \u2113(q i ) \u2211 j=1 ( 1 \u2212 z ij * f ( \u20d7 w, \u20d7 x j(1) q i \u2212 \u20d7 x j(2) q i ) ) +", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking Model Adaptation via Query Weighting", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where IW (.) takes any one of the weighting schemes given by algorithm 1 and algorithm 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking Model Adaptation via Query Weighting", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We evaluated the proposed two query weighting methods on TREC-2003 and TREC-2004 web track datasets, which were released through LETOR3.0 as a benchmark collection for learning to rank by (Qin et al., 2010) . Originally, different query tasks were defined on different parts of data in the collection, which can be considered as different domains for us. Adaptation takes place when ranking tasks are performed by using the models trained on the domains in which they were originally defined to rank the documents in other domains. Our goal is to demonstrate that query weighting can be more effective than the state-of-the-art document instance weighting.", |
| "cite_spans": [ |
| { |
| "start": 188, |
| "end": 206, |
| "text": "(Qin et al., 2010)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Three query tasks were defined in TREC-2003 and TREC-2004 web track, which are home page finding (HP), named page finding (NP) and topic distillation (TD) (Voorhees, 2003; Voorhees, 2004) . In this dataset, each document instance is represented by 64 features, including low-level features such as term frequency, inverse document frequency and document length, and high-level features such as BM25, language-modeling, PageRank and HITS. The number of queries of each task is given in Table 1 . The baseline ranking model is an RSVM directly trained on the source domain without using any weighting methods, denoted as no-weight. We implemented two weighting measures based on domain separator and Kullback-Leibler divergence, referred to DS and KL, respectively. In DS measure, three document instance weighting methods based on probability principle (Gao et al., 2010) were implemented for comparison, denoted as doc-pair, doc-avg and doc-comb (see Section 2). In KL measure, there is no probabilistic meaning for KL weight Query Task TREC 2003 TREC 2004 Topic Distillation 50 75 Home Page finding 150 75 Named Page finding 150 75 (Shalev-Shwartz et al., 2007) . The reported performance is obtained by five-fold cross validation.", |
| "cite_spans": [ |
| { |
| "start": 155, |
| "end": 171, |
| "text": "(Voorhees, 2003;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 172, |
| "end": 187, |
| "text": "Voorhees, 2004)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 852, |
| "end": 870, |
| "text": "(Gao et al., 2010)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1141, |
| "end": 1170, |
| "text": "(Shalev-Shwartz et al., 2007)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 485, |
| "end": 492, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 1032, |
| "end": 1133, |
| "text": "Task TREC 2003 TREC 2004 Topic Distillation 50 75 Home Page finding 150 75 Named Page finding", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets and Setup", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The task of HP and NP are more similar to each other whereas HP/NP is rather different from TD (Voorhees, 2003; Voorhees, 2004) . Thus, we carried out HP/NP to TD and TD to HP/NP ranking adaptation tasks. Mean Average Precision (MAP) (Baeza-Yates and Ribeiro-Neto, 1999) is used as the ranking performance measure.", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 111, |
| "text": "(Voorhees, 2003;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 112, |
| "end": 127, |
| "text": "Voorhees, 2004)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The first set of experiments performed adaptation from HP to TD and NP to TD. The results of MAP are shown in Table 2 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 110, |
| "end": 117, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adaptation from HP/NP to TD", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "For the DS-based measure, as shown in the table, query-aggr works mostly better than no-weight,docpair, doc-avg and doc-comb, and query-comp performs the best among the five weighting methods. T-test on MAP indicates that the improvement of query-aggr over no-weight is statistically significant on two adaptation tasks while the improvement of document instance weighting over no-weight is statistically significant only on one task. All of the improvement of query-comp over no-weight, docpair,doc-avg and doc-comb are statistically significant. This demonstrates the effectiveness of query weighting compared to document instance weighting. Furthermore, query-comp can perform better than query-aggr. The reason is that although document feature aggregation might be a reasonable representation for a set of document instances, it is possible that some information could be lost or distorted in the process of compression. By contrast, more accurate query weights can be achieved by the more fine-grained similarity measure between the source query and all target queries in algorithm 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptation from HP/NP to TD", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "For the KL-based measure, similar observation can be obtained. However, it's obvious that DSbased models can work better than the KL-based. The reason is that KL conducts weighting by density function ratio which is sensitive to the data scale. Specifically, after document feature aggregation, the number of query feature vectors in all adaptation tasks is no more than 150 in source and target domains. It renders the density estimation in queryaggr is very inaccurate since the set of samples is too small. As each query contains 1000 documents, they seemed to provide query-comp enough samples for achieving reasonable estimation of the density functions in both domains.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptation from HP/NP to TD", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "To further validate the effectiveness of query weighting, we also conducted adaptation from TD to HP and TD to NP . MAP results with significant test are shown in Table 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 163, |
| "end": 170, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Adaptation from TD to HP/NP", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "We can see that document instance weighting schemes including doc-pair, doc-avg and doc-comb can not outperform no-weight based on MAP measure. The reason is that each query in TD has 1000 retrieved documents in which 10-15 documents are relevant whereas each query in HP or NP only consists 1-2 relevant documents. Thus, when TD serves as the source domain, it leads to the problem that too many document pairs were generated for training the RSVM model. In this case, a small number of documents that were weighted inaccurately can make significant impact on many number of document pairs. Since query weighting method directly estimates the query importance instead of document instance importance, both query-aggr and querycomp can avoid such kind of negative influence that is inevitable in the three document instance weighting methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Adaptation from TD to HP/NP", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "An interesting problem is which queries in the source domain are assigned high weights and why it's the case. Query weighting assigns each source query with a weight value. Note that it's not meaningful to directly compare absolute weight values between query-aggr and query-comp because source query weights from distinct weighting methods have different range and scale. However, it is feasible to compare the weights with the same weighting method. Intuitively, if the ranking model learned from a source query can work well in target domain, it should get high weight. from queries q 1 s and q 2 s respectively, and f q 1 s performs better than f q 2 s , then the source query weight of q 1 s should be higher than that of q 2 s . For further analysis, we compare the weight values between each source query pair, for which we trained RSVM on each source query and evaluated the learned model on test data from target domain. Then, the source queries are ranked according to the MAP values obtained by their corresponding ranking models. The order is denoted as R map . Meanwhile, the source queries are also ranked with respect to their weights estimated by DS-based measure, and the order is denoted as R weight . We hope R weight is correlated as positively as possible with R map . For comparison, we also ranked these queries according to randomly generated query weights, which is denoted as query-rand in addition to queryaggr and query-comp. The Kendall's \u03c4 = P \u2212Q P +Q is used to measure the correlation (Kendall, 1970) , where P is the number of concordant query pairs and Q is the number of discordant pairs. It's noted that \u03c4 's range is from -1 to 1, and the larger value means the two ranking is better correlated. The Kendall's \u03c4 by different weighting methods are given in Table 4 and 5.", |
| "cite_spans": [ |
| { |
| "start": 1517, |
| "end": 1532, |
| "text": "(Kendall, 1970)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1793, |
| "end": 1800, |
| "text": "Table 4", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Analysis on Source Query Weights", |
| "sec_num": "5.2.3" |
| }, |
| { |
| "text": "We find that R weight produced by query-aggr and query-comp are all positively correlated with R map and clearly the orders generated by query-comp are more positive than those by query-aggr. This is another explanation why query-comp outperforms query-aggr. Furthermore, both are far better than query-rand because the R weight by query-rand is actually independent of R map .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Analysis on Source Query Weights", |
| "sec_num": "5.2.3" |
| }, |
| { |
| "text": "In the situation where there are large scale data in source and target domains, how to efficiently weight a source query is another interesting problem. Without the loss of generality, we reported the weighting time of doc-pair, query-aggr and query-comp from adaptation from TD to HP using DS measure. As doc-avg and doc-comb are derived from doc-pair, their efficiency is equivalent to doc-pair.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Efficiency", |
| "sec_num": "5.2.4" |
| }, |
| { |
| "text": "As shown in table 6, query-aggr can efficiently weight query using query feature vector. The reason is two-fold: one is the operation of query document aggregation can be done very fast, and the other is there are 1000 documents in each query of TD or HP, which means that the compression ratio is 1000:1. Thus, the domain separator can be found quickly. In addition, query-comp is more efficient than doc-pair because doc-pair needs too much time to find the separator using all instances from source and target domain. And query-comp uses a divide-and-conquer method to measure the similarity of source query to each target query, and then efficiently combine these Weighting method HP03 to TD03 HP04 to TD04 NP03 to TD03 NP04 to TD04 query-aggr 0.0906 0.0280 0.0247 0.0525 query-comp 0.1001 0.0804 0.0711 0.1737 query-rand 0.0041 0.0008 -0.0127 0.0163 fine-grained similarity values.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Efficiency", |
| "sec_num": "5.2.4" |
| }, |
| { |
| "text": "Cross-domain knowledge transfer has became an important topic in machine learning and natural language processing (Ben-David et al., 2010; Jiang and Zhai, 2007; Blitzer et al., 2006; Daum\u00e9 III and Marcu, 2006) . (Blitzer et al., 2006 ) proposed model adaptation using pivot features to build structural feature correspondence in two domains. (Pan et al., 2009) proposed to seek a common features space to reduce the distribution difference between the source and target domain. (Daum\u00e9 III and Marcu, 2006) assumed training instances were generated from source domain, target domain and crossdomain distributions, and estimated the parameter for the mixture distribution.", |
| "cite_spans": [ |
| { |
| "start": 114, |
| "end": 138, |
| "text": "(Ben-David et al., 2010;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 139, |
| "end": 160, |
| "text": "Jiang and Zhai, 2007;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 161, |
| "end": 182, |
| "text": "Blitzer et al., 2006;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 183, |
| "end": 209, |
| "text": "Daum\u00e9 III and Marcu, 2006)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 212, |
| "end": 233, |
| "text": "(Blitzer et al., 2006", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 342, |
| "end": 360, |
| "text": "(Pan et al., 2009)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 478, |
| "end": 505, |
| "text": "(Daum\u00e9 III and Marcu, 2006)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Recently, domain adaptation in learning to rank received more and more attentions due to the lack of training data in new search domains. Existing ranking adaptation approaches can be grouped into feature-based (Geng et al., 2009; Chen et al., 2008b; Wang et al., 2009; Gao et al., 2009) and instancebased (Chen et al., 2010; Chen et al., 2008a; Gao et al., 2010) approaches. In (Geng et al., 2009; Chen et al., 2008b) , the parameters of ranking model trained on the source domain was adjusted with the small set of labeled data in the target domain. (Wang et al., 2009) aimed at ranking adaptation in heterogeneous domains. (Gao et al., 2009) learned ranking models on the source and target domains independently, and then constructed a stronger model by interpolating the two models. (Chen et al., 2010; Chen et al., 2008a) weighted source instances by using small amount of labeled data in the target domain. (Gao et al., 2010) studied instance weighting based on domain separator for learning to rank by only using training data from source domain. In this work, we propose to directly measure the query importance instead of document instance importance by considering information at both levels.", |
| "cite_spans": [ |
| { |
| "start": 211, |
| "end": 230, |
| "text": "(Geng et al., 2009;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 231, |
| "end": 250, |
| "text": "Chen et al., 2008b;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 251, |
| "end": 269, |
| "text": "Wang et al., 2009;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 270, |
| "end": 287, |
| "text": "Gao et al., 2009)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 306, |
| "end": 325, |
| "text": "(Chen et al., 2010;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 326, |
| "end": 345, |
| "text": "Chen et al., 2008a;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 346, |
| "end": 363, |
| "text": "Gao et al., 2010)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 379, |
| "end": 398, |
| "text": "(Geng et al., 2009;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 399, |
| "end": 418, |
| "text": "Chen et al., 2008b)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 552, |
| "end": 571, |
| "text": "(Wang et al., 2009)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 626, |
| "end": 644, |
| "text": "(Gao et al., 2009)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 787, |
| "end": 806, |
| "text": "(Chen et al., 2010;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 807, |
| "end": 826, |
| "text": "Chen et al., 2008a)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 913, |
| "end": 931, |
| "text": "(Gao et al., 2010)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We introduced two simple yet effective query weighting methods for ranking model adaptation. The first represents a set of document instances within the same query as a query feature vector, and then directly measure the source query importance to the target domain. The second measures the similarity between a source query and each target query, and then combine the fine-grained similarity values to estimate its importance to target domain. We evaluated our approaches on LETOR3.0 dataset for ranking adaptation and found that: (1) the first method efficiently estimate query weights, and can outperform the document instance weighting but some information is lost during the aggregation; (2) the second method consistently and significantly outperforms document instance weighting.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": " (No. 2009AA01Z150). We also thank anonymous reviewers for their helpful comments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "acknowledgement", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Modern Information Retrieval", |
| "authors": [ |
| { |
| "first": "Ricardo", |
| "middle": [ |
| "A" |
| ], |
| "last": "Baeza-Yates", |
| "suffix": "" |
| }, |
| { |
| "first": "Berthier", |
| "middle": [], |
| "last": "Ribeiro-Neto", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ricardo A. Baeza-Yates and Berthier Ribeiro-Neto. 1999. Modern Information Retrieval.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Efficient and accurate local learning for ranking", |
| "authors": [ |
| { |
| "first": "Somnath", |
| "middle": [], |
| "last": "Banerjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Avinava", |
| "middle": [], |
| "last": "Dubey", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinesh", |
| "middle": [], |
| "last": "Machchhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Soumen", |
| "middle": [], |
| "last": "Chakrabarti", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "SIGIR workshop : Learning to rank for information retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Somnath Banerjee, Avinava Dubey, Jinesh Machchhar, and Soumen Chakrabarti. 2009. Efficient and accu- rate local learning for ranking. In SIGIR workshop : Learning to rank for information retrieval, pages 1-8.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A theory of learning from different domains", |
| "authors": [ |
| { |
| "first": "Shai", |
| "middle": [], |
| "last": "Ben-David", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Blitzer", |
| "suffix": "" |
| }, |
| { |
| "first": "Koby", |
| "middle": [], |
| "last": "Crammer", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Kulesza", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [ |
| "Wortman" |
| ], |
| "last": "Vaughan", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Machine Learning", |
| "volume": "79", |
| "issue": "", |
| "pages": "151--175", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shai Ben-David, John Blitzer, Koby Crammer, Alex Kulesza, Fernando Pereira, and Jennifer Wortman Vaughan. 2010. A theory of learning from different domains. Machine Learning, 79(1-2):151-175.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Domain adaptation with structural correspondence learning", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Blitzer", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Blitzer, Ryan Mcdonald, and Fernando Pereira. 2006. Domain adaptation with structural correspon- dence learning. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Learning to rank using gradient descent", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Burges", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Shaked", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Renshaw", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Lazier", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Deeds", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Hamilton", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Hullender", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "89--96", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "C. Burges, T. Shaked, E. Renshaw, A. Lazier, M. Deeds, N. Hamilton, and G. Hullender. 2005. Learning to rank using gradient descent. In Proceedings of ICML, pages 89-96.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Learning to rank: from pairwise approach to listwise approach", |
| "authors": [ |
| { |
| "first": "Zhe", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Feng", |
| "middle": [], |
| "last": "Tsai", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "129--136", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhe Cao, Tao Qin, Tie-Yan Liu, Ming-Feng Tsai, and Hang Li. 2007. Learning to rank: from pairwise ap- proach to listwise approach. In Proceedings of ICML, pages 129 -136.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Transrank: A novel algorithm for transfer of rank learning", |
| "authors": [ |
| { |
| "first": "Depin", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Gang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Weiguo", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ICDM Workshops", |
| "volume": "", |
| "issue": "", |
| "pages": "106--115", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Depin Chen, Jun Yan, Gang Wang, Yan Xiong, Weiguo Fan, and Zheng Chen. 2008a. Transrank: A novel algorithm for transfer of rank learning. In Proceedings of ICDM Workshops, pages 106-115.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Trada: Tree based ranking function adaptation", |
| "authors": [ |
| { |
| "first": "Keke", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Rongqing", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "K" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| }, |
| { |
| "first": "Gordon", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Larry", |
| "middle": [], |
| "last": "Heck", |
| "suffix": "" |
| }, |
| { |
| "first": "Belle", |
| "middle": [], |
| "last": "Tseng", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of CIKM", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Keke Chen, Rongqing Lu, C.K. Wong, Gordon Sun, Larry Heck, and Belle Tseng. 2008b. Trada: Tree based ranking function adaptation. In Proceedings of CIKM.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Knowledge transfer for cross domain learning to rank", |
| "authors": [ |
| { |
| "first": "Depin", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Gui-Rong", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| }, |
| { |
| "first": "Gang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Information Retrieval", |
| "volume": "13", |
| "issue": "3", |
| "pages": "236--253", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Depin Chen, Yan Xiong, Jun Yan, Gui-Rong Xue, Gang Wang, and Zheng Chen. 2010. Knowledge transfer for cross domain learning to rank. Information Re- trieval, 13(3):236-253.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Domain adaptation for statistical classifiers", |
| "authors": [ |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Journal of Artificial Intelligence Research", |
| "volume": "26", |
| "issue": "1", |
| "pages": "101--126", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hal Daum\u00e9 III and Daniel Marcu. 2006. Domain adap- tation for statistical classifiers. Journal of Artificial Intelligence Research, 26(1):101-126.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "An efficient boosting algorithm for combining preferences", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Freund", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Iyer", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Schapire", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Singer", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "4", |
| "issue": "", |
| "pages": "933--969", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Y. Freund, R. Iyer, R. Schapire, and Y. Singer. 2004. An efficient boosting algorithm for combining prefer- ences. Journal of Machine Learning Research, 4:933- 969.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Model adaptation via model interpolation and boosting for web search ranking", |
| "authors": [ |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiang", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Burges", |
| "suffix": "" |
| }, |
| { |
| "first": "Krysta", |
| "middle": [], |
| "last": "Svore", |
| "suffix": "" |
| }, |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Nazan", |
| "middle": [], |
| "last": "Khan", |
| "suffix": "" |
| }, |
| { |
| "first": "Shalin", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongyan", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jianfeng Gao, Qiang Wu, Chris Burges, Krysta Svore, Yi Su, Nazan Khan, Shalin Shah, and Hongyan Zhou. 2009. Model adaptation via model interpolation and boosting for web search ranking. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Learning to rank only using training data from related domain", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Kam", |
| "middle": [ |
| "Fai" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| }, |
| { |
| "first": "Aoying", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of SIGIR", |
| "volume": "", |
| "issue": "", |
| "pages": "162--169", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Gao, Peng Cai, Kam Fai Wong, and Aoying Zhou. 2010. Learning to rank only using training data from related domain. In Proceedings of SIGIR, pages 162- 169.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Query dependent ranking using k-nearest neighbor", |
| "authors": [ |
| { |
| "first": "Xiubo", |
| "middle": [], |
| "last": "Geng", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Arnold", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Heung-Yeung", |
| "middle": [], |
| "last": "Shum", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of SIGIR", |
| "volume": "", |
| "issue": "", |
| "pages": "115--122", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiubo Geng, Tie-Yan Liu, Tao Qin, Andrew Arnold, Hang Li, and Heung-Yeung Shum. 2008. Query de- pendent ranking using k-nearest neighbor. In Proceed- ings of SIGIR, pages 115-122.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Ranking model adaptation for domain-specific search", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Geng", |
| "suffix": "" |
| }, |
| { |
| "first": "Linjun", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chao", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xian-Sheng", |
| "middle": [], |
| "last": "Hua", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of CIKM", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Geng, Linjun Yang, Chao Xu, and Xian-Sheng Hua. 2009. Ranking model adaptation for domain-specific search. In Proceedings of CIKM.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Large Margin Rank Boundaries for Ordinal Regression", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Herbrich", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Graepel", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Obermayer", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Herbrich, T. Graepel, and K. Obermayer. 2000. Large Margin Rank Boundaries for Ordinal Regres- sion. MIT Press, Cambridge.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Correcting sample selection bias by unlabeled data", |
| "authors": [ |
| { |
| "first": "Jiayuan", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "J" |
| ], |
| "last": "Smola", |
| "suffix": "" |
| }, |
| { |
| "first": "Arthur", |
| "middle": [], |
| "last": "Gretton", |
| "suffix": "" |
| }, |
| { |
| "first": "Karsten", |
| "middle": [ |
| "M" |
| ], |
| "last": "Borgwardt", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernhard", |
| "middle": [], |
| "last": "Sch\u00f6lkopf", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "601--608", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiayuan Huang, Alexander J. Smola, Arthur Gretton, Karsten M. Borgwardt, and Bernhard Sch\u00f6lkopf. 2007. Correcting sample selection bias by unlabeled data. In Proceedings of NIPS, pages 601-608.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Instance weighting for domain adaptation in nlp", |
| "authors": [ |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengxiang", |
| "middle": [], |
| "last": "Zhai", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jing Jiang and ChengXiang Zhai. 2007. Instance weight- ing for domain adaptation in nlp. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Optimizing search engines using clickthrough data", |
| "authors": [ |
| { |
| "first": "Thorsten", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of SIGKDD", |
| "volume": "", |
| "issue": "", |
| "pages": "133--142", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thorsten Joachims. 2002. Optimizing search engines using clickthrough data. In Proceedings of SIGKDD, pages 133-142.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Learning to rank for information retrieval", |
| "authors": [ |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Foundations and Trends in Information Retrieval", |
| "volume": "3", |
| "issue": "3", |
| "pages": "225--331", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tie-Yan Liu. 2009. Learning to rank for information retrieval. Foundations and Trends in Information Re- trieval, 3(3):225-331.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Domain adaptation via transfer component analysis", |
| "authors": [ |
| { |
| "first": "Ivor", |
| "middle": [ |
| "W" |
| ], |
| "last": "Sinno Jialin Pan", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "T" |
| ], |
| "last": "Tsang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiang", |
| "middle": [], |
| "last": "Kwok", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "1187--1192", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sinno Jialin Pan, Ivor W. Tsang, James T. Kwok, and Qiang Yang. 2009. Domain adaptation via transfer component analysis. In Proceedings of IJCAI, pages 1187-1192.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Probabilistic outputs for support vector machines and comparisons to regularized likelihood methods", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "C" |
| ], |
| "last": "Platt", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Platt", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Advances in Large Margin Classifiers", |
| "volume": "", |
| "issue": "", |
| "pages": "61--74", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John C. Platt and John C. Platt. 1999. Probabilistic out- puts for support vector machines and comparisons to regularized likelihood methods. In Advances in Large Margin Classifiers, pages 61-74. MIT Press.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Letor: A benchmark collection for research on learning to rank for information retrieval", |
| "authors": [ |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Information Retrieval", |
| "volume": "13", |
| "issue": "4", |
| "pages": "346--374", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tao Qin, Tie-Yan Liu, Jun Xu, and Hang Li. 2010. Letor: A benchmark collection for research on learning to rank for information retrieval. Information Retrieval, 13(4):346-374.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Pegasos: Primal estimated sub-gradient solver for svm", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Shalev-Shwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Singer", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Srebro", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 24th International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "807--814", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Shalev-Shwartz, Y. Singer, and N. Srebro. 2007. Pe- gasos: Primal estimated sub-gradient solver for svm. In Proceedings of the 24th International Conference on Machine Learning, pages 807-814.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Improving predictive inference under covariate shift by weighting the loglikelihood function", |
| "authors": [ |
| { |
| "first": "Hidetoshi", |
| "middle": [], |
| "last": "Shimodaira", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Journal of Statistical Planning and Inference", |
| "volume": "90", |
| "issue": "", |
| "pages": "227--244", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hidetoshi Shimodaira. 2000. Improving predictive in- ference under covariate shift by weighting the log- likelihood function. Journal of Statistical Planning and Inference, 90:227-244.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Direct importance estimation with model selection and its application to covariate shift adaptation", |
| "authors": [ |
| { |
| "first": "Masashi", |
| "middle": [], |
| "last": "Sugiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Shinichi", |
| "middle": [], |
| "last": "Nakajima", |
| "suffix": "" |
| }, |
| { |
| "first": "Hisashi", |
| "middle": [], |
| "last": "Kashima", |
| "suffix": "" |
| }, |
| { |
| "first": "Motoaki", |
| "middle": [], |
| "last": "Paul Von B\u00fcnau", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kawanabe", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "1433--1440", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Masashi Sugiyama, Shinichi Nakajima, Hisashi Kashima, Paul von B\u00fcnau, and Motoaki Kawan- abe. 2008. Direct importance estimation with model selection and its application to covariate shift adaptation. In Proceedings of NIPS, pages 1433-1440.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Overview of trec", |
| "authors": [ |
| { |
| "first": "Ellen", |
| "middle": [ |
| "M" |
| ], |
| "last": "Voorhees", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of TREC-2003", |
| "volume": "", |
| "issue": "", |
| "pages": "1--13", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellen M. Voorhees. 2003. Overview of trec 2003. In Proceedings of TREC-2003, pages 1-13.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Overview of trec", |
| "authors": [ |
| { |
| "first": "Ellen", |
| "middle": [ |
| "M" |
| ], |
| "last": "Voorhees", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of TREC-2004", |
| "volume": "", |
| "issue": "", |
| "pages": "1--12", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellen M. Voorhees. 2004. Overview of trec 2004. In Proceedings of TREC-2004, pages 1-12.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Heterogeneous cross domain ranking in latent space", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Songcan", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanzhu", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of CIKM", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Wang, Jie Tang, Wei Fan, Songcan Chen, Zi Yang, and Yanzhu Liu. 2009. Heterogeneous cross domain ranking in latent space. In Proceedings of CIKM.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "A support vector method for optimizing average precision", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Yue", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Finley", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Radlinski", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of SIGIR", |
| "volume": "", |
| "issue": "", |
| "pages": "271--278", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Y. Yue, T. Finley, F. Radlinski, and T. Joachims. 2007. A support vector method for optimizing average preci- sion. In Proceedings of SIGIR, pages 271-278.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Learning and evaluating classifiers under sample selection bias", |
| "authors": [ |
| { |
| "first": "Zadrozny", |
| "middle": [], |
| "last": "Bianca Zadrozny", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "325--332", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bianca Zadrozny Zadrozny. 2004. Learning and evalu- ating classifiers under sample selection bias. In Pro- ceedings of ICML, pages 325-332.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "type_str": "figure", |
| "text": "A toy example showing the problem of document instance weighting scheme.", |
| "uris": null, |
| "num": null |
| }, |
| "TABREF0": { |
| "html": null, |
| "num": null, |
| "text": "The number of queries in TREC-2003 and TREC-2004 web track and the doc-comb based on KL is not interpretable, and we only present the results of doc-pair and docavg for KL measure.", |
| "type_str": "table", |
| "content": "<table><tr><td>Our proposed query weight-</td></tr><tr><td>ing methods are denoted by query-aggr and query-</td></tr><tr><td>comp, corresponding to document feature aggrega-</td></tr><tr><td>tion in query and query comparison across domains,</td></tr><tr><td>respectively. All ranking models above were trained</td></tr><tr><td>only on source domain training data and the labeled</td></tr><tr><td>data of target domain was just used for testing.</td></tr><tr><td>For training the models efficiently, we imple-</td></tr><tr><td>mented RSVM with Stochastic Gradient Descent</td></tr><tr><td>(SGD) optimizer</td></tr></table>" |
| }, |
| "TABREF2": { |
| "html": null, |
| "num": null, |
| "text": "Results of MAP for HP/NP to TD adaptation. \u2020, \u2021, \u266f and boldface indicate significantly better than no-weight, doc-pair, doc-avg and doc-comb, respectively. Confidence level is set at 95%", |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF4": { |
| "html": null, |
| "num": null, |
| "text": "Results of MAP for TD to HP/NP adaptation. \u2020, \u2021, \u266f and boldface indicate significantly better than no-weight, doc-pair, doc-avg and doc-comb, respectively. Confidence level is set as 95%.", |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF6": { |
| "html": null, |
| "num": null, |
| "text": "The efficiency of weighting in seconds.", |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF7": { |
| "html": null, |
| "num": null, |
| "text": "The Kendall's \u03c4 of R weight and R map in HP/NP to TD adaptation.", |
| "type_str": "table", |
| "content": "<table><tr><td colspan=\"5\">Weighting method TD03 to HP03 TD04 to HP04 TD03 to NP03 TD04 to NP04</td></tr><tr><td>query-aggr</td><td>0.1172</td><td>0.0121</td><td>0.0574</td><td>0.0464</td></tr><tr><td>query-comp</td><td>0.1304</td><td>0.1393</td><td>0.1586</td><td>0.0545</td></tr><tr><td>query-rand</td><td>\u22120.0291</td><td>0.0022</td><td>0.0161</td><td>-0.0262</td></tr></table>" |
| }, |
| "TABREF8": { |
| "html": null, |
| "num": null, |
| "text": "The Kendall's \u03c4 of R weight and R map in TD to HP/NP adaptation.", |
| "type_str": "table", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |