| | import json |
| | from pathlib import Path |
| |
|
| | import numpy as np |
| | import pandas as pd |
| | import scipy.stats as sts |
| |
|
| | import ir_datasets as irds |
| | import ir_measures as irms |
| |
|
| | from tqdm import tqdm |
| |
|
| | domains = ['science', 'recreation', 'technology', 'lifestyle', 'writing'] |
| |
|
| | |
| | domain_distributions = pd.concat({ |
| | "D1": pd.DataFrame({ |
| | domains[i]: [sts.distributions.betabinom(a=(i+1), b=(5-i), n=4).pmf(j) for j in range(5) ] |
| | for i in range(5) |
| | }), |
| | "D2": pd.DataFrame({ |
| | domains[i]: ([sts.distributions.betabinom(a=1, b=10, n=5).pmf(j) for j in range(5) ]*2)[5-i:10-i] |
| | for i in range(5) |
| | }), |
| | "D3": pd.DataFrame({ |
| | domains[i]: ([0]*i + [sts.distributions.betabinom(a=1, b=(5-i)*2, n=4-i).pmf(j) for j in range(5) ])[:5] |
| | for i in range(5) |
| | }) |
| | }, names=['dist_type', 'session'], axis=0).rename_axis('domains', axis=1) |
| |
|
| |
|
| | sampling_cdf = domain_distributions.unstack('dist_type').cumsum(axis=0).pipe(lambda x: x/x.loc[4]) |
| |
|
| |
|
| | |
| | np.random.seed(123) |
| |
|
| | fps = { |
| | dt: { i: open(f'./stream_distribution_dss/queries/test_{dt}_{i}.jsonl', 'w') for i in sampling_cdf.index } |
| | for dt in sampling_cdf.columns.get_level_values('dist_type').unique() |
| | } |
| |
|
| | for dom in tqdm(domains, desc='queries'): |
| | cdfs = sampling_cdf[dom] |
| |
|
| | for query in tqdm(irds.load(f'lotte/{dom}/test/forum').queries, desc=dom): |
| | query = { 'query_id': dom+query.query_id, 'text': query.text, 'randval': np.random.random() } |
| | belong = (query['randval'] < cdfs).agg(lambda x: x.tolist().index(True)).to_dict() |
| | for dt, session in belong.items(): |
| | fps[dt][session].write(json.dumps(query) + '\n') |
| |
|
| | for l in fps.values(): |
| | for fp in l.values(): |
| | fp.close() |
| |
|
| | |
| | qrels_pool = [ q._replace(query_id=d+q.query_id, doc_id=d+q.doc_id) for d in domains for q in irds.load(f'lotte/{d}/test/forum').qrels ] |
| | qrels_pool_grouped = {} |
| |
|
| | for q in qrels_pool: |
| | if q.query_id not in qrels_pool_grouped: |
| | qrels_pool_grouped[q.query_id] = [] |
| | qrels_pool_grouped[q.query_id].append(q) |
| |
|
| | for fn in tqdm(Path("./queries/").glob("*.jsonl"), desc='qrels'): |
| | _, dt, i = fn.stem.split("_") |
| | contains_query_ids = [ json.loads(q)['query_id'] for q in fn.open() ] |
| | with open(f'./qrels/{fn.stem}.qrels', 'w') as fw: |
| | for query_id in contains_query_ids: |
| | for qr in qrels_pool_grouped[query_id]: |
| | fw.write(f"{query_id} {qr.iteration} {qr.doc_id} {qr.relevance}\n") |
| |
|
| | doc_latest_appear_session = { |
| | dt: { |
| | d.doc_id: i |
| | for i in range(5) |
| | for d in irms.read_trec_qrels(f'./qrels/test_{dt}_{i}.qrels') |
| | } |
| | for dt in sampling_cdf.columns.get_level_values('dist_type').unique() |
| | } |
| |
|
| | def _marginalize_cdf(cdf): |
| | return cdf/cdf.iloc[-1] |
| | |
| |
|
| | |
| | np.random.seed(123) |
| |
|
| | doc_lists = { |
| | dt: {i: [] for i in sampling_cdf.index} |
| | for dt in sampling_cdf.columns.get_level_values('dist_type').unique() |
| | } |
| |
|
| | for dom in tqdm(domains, desc='docs'): |
| | cdfs = sampling_cdf[dom] |
| |
|
| | for doc in tqdm(irds.load(f'lotte/{dom}/test/forum').docs, desc=dom): |
| | doc = { |
| | 'docid': dom+doc.doc_id, |
| | 'text': doc.text, |
| | 'randval': np.random.random(), |
| | } |
| | |
| | for dt in cdfs.columns: |
| | appear_before = doc_latest_appear_session[dt].get(doc['docid'], 99) |
| | session = (doc['randval'] < cdfs[dt].loc[:appear_before+1].pipe(_marginalize_cdf)).tolist().index(True) |
| | doc_lists[dt][session].append({**doc, 'before': appear_before}) |
| |
|
| |
|
| | for dt, ll in doc_lists.items(): |
| | for i, l in ll.items(): |
| | pd.DataFrame(l).to_parquet(f'./docs/test_{dt}_{i}.parquet') |