Michaelyya commited on
Commit
d7970b4
·
verified ·
1 Parent(s): 41b175c

Upload 10 files

Browse files
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ datasets/QACandidate_Pool.csv filter=lfs diff=lfs merge=lfs -text
Multi-label_Task/metrics.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import os
3
+
4
+ impact_columns = [
5
+ "Infrastructural impact",
6
+ "Political impact",
7
+ "Economic impact",
8
+ "Ecological impact",
9
+ "Agricultural impact",
10
+ "Human health impact"
11
+ ]
12
+ groupby=["Date","Time_Period"]
13
+ gold_data = pd.read_csv("the_path_to_gold_data.csv")
14
+ gold_data.columns = [x.capitalize() for x in gold_data.columns]
15
+
16
+ def eval_row_wise_acc(data, output_file):
17
+ data.columns = [x.capitalize() for x in data.columns]
18
+ models = data['Model_type'].unique()
19
+
20
+ gold_grouped = gold_data.groupby(groupby)[impact_columns].max()
21
+ results = []
22
+
23
+ for model in models:
24
+ model_data = data[data['Model_type'] == model]
25
+ grouped = model_data.groupby(groupby)[impact_columns].max()
26
+ merged = grouped.join(gold_grouped, how='inner', lsuffix='_model', rsuffix='_gold')
27
+
28
+ all_correct = (merged[[f"{col}_model" for col in impact_columns]].values ==
29
+ merged[[f"{col}_gold" for col in impact_columns]].values).all(axis=1)
30
+
31
+ accuracy = all_correct.sum() / len(all_correct) if len(all_correct) > 0 else 0
32
+ results.append({
33
+ "Model_Type": model,
34
+ "Row-Wise-Accuracy": round(accuracy, 4)
35
+ })
36
+
37
+ df_result = pd.DataFrame(results)
38
+ if not os.path.isfile(output_file):
39
+ df_result.to_csv(output_file, index=False)
40
+ else:
41
+ df_result.to_csv(output_file, mode='a', header=False, index=False)
42
+
43
+ def eval_metrics(data, output_file):
44
+ data.columns = [x.capitalize() for x in data.columns]
45
+ models = data["Model_type"].unique()
46
+ gold_grouped = gold_data.groupby(groupby)[impact_columns].max()
47
+ results = []
48
+
49
+ for model in models:
50
+ model_data = data[data["Model_type"] == model]
51
+ grouped = model_data.groupby(groupby)[impact_columns].max()
52
+ merged = grouped.join(gold_grouped, how="inner", lsuffix="_model", rsuffix="_gold")
53
+
54
+ for metric_name in ["Precision", "Recall", "F1", "Accuracy"]:
55
+ metrics = {"Model_Type": model, "Metric": metric_name}
56
+ for col in impact_columns:
57
+ tp = ((merged[f"{col}_model"] == 1) & (merged[f"{col}_gold"] == 1)).sum()
58
+ tn = ((merged[f"{col}_model"] == 0) & (merged[f"{col}_gold"] == 0)).sum()
59
+ fp = ((merged[f"{col}_model"] == 1) & (merged[f"{col}_gold"] == 0)).sum()
60
+ fn = ((merged[f"{col}_model"] == 0) & (merged[f"{col}_gold"] == 1)).sum()
61
+
62
+ if metric_name == "Precision":
63
+ value = tp / (tp + fp) if (tp + fp) > 0 else 0
64
+ elif metric_name == "Recall":
65
+ value = tp / (tp + fn) if (tp + fn) > 0 else 0
66
+ elif metric_name == "F1":
67
+ precision = tp / (tp + fp) if (tp + fp) > 0 else 0
68
+ recall = tp / (tp + fn) if (tp + fn) > 0 else 0
69
+ value = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
70
+ elif metric_name == "Accuracy":
71
+ value = (tp + tn) / (tp + tn + fp + fn) if (tp + tn + fp + fn) > 0 else 0
72
+
73
+ metrics[col] = round(value, 4)
74
+ results.append(metrics)
75
+
76
+ df_result = pd.DataFrame(results)
77
+ print(df_result)
78
+
79
+ if not os.path.isfile(output_file):
80
+ df_result.to_csv(output_file, index=False)
81
+ else:
82
+ df_result.to_csv(output_file, mode="a", header=False, index=False)
83
+
84
+ data = pd.read_csv("/content/output_gpt.csv")
85
+ eval_metrics(data, "accuracy_results.csv")
Multi-label_Task/model_eval.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import requests
3
+ import re
4
+ import random
5
+ import time
6
+ import json
7
+ from openai import OpenAI
8
+ import os
9
+ import dotenv
10
+ import csv
11
+ import transformers
12
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
13
+ import torch
14
+ dotenv.load_dotenv()
15
+ client = OpenAI(
16
+ api_key=os.environ.get("OPENAI_API_KEY")
17
+ )
18
+
19
+ API_KEY=""
20
+ from huggingface_hub import login
21
+ login(token=API_KEY)
22
+ huggingface_models=["meta-llama/Meta-Llama-3-8B-Instruct","Qwen/Qwen2.5-7B-Instruct","mistralai/Mixtral-8x7B-Instruct-v0.1","Qwen/Qwen2.5-14B-Instruct","Qwen/Qwen2.5-7B-Instruct","google/gemma-2-9b-it","mistralai/Mistral-Small-24B-Instruct-2501"]
23
+ model_name = "model_name" #Change here for testing your model
24
+
25
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
26
+ config = BitsAndBytesConfig(
27
+ load_in_4bit=True,
28
+ bnb_4bit_compute_dtype=torch.float16,
29
+ bnb_4bit_use_double_quant=True,
30
+ llm_int8_enable_fp32_cpu_offload=True
31
+ )
32
+ model = AutoModelForCausalLM.from_pretrained(
33
+ model_name,
34
+ cache_dir=YOUR_CACHE_DIR,
35
+ device_map={"": 0},
36
+ quantization_config=config
37
+ )
38
+ model.gradient_checkpointing_enable()
39
+
40
+ def extract_answer(full_text):
41
+ pattern = (
42
+ r"(Infrastructural|"
43
+ r"Agricultural|"
44
+ r"Ecological|"
45
+ r"Financial|"
46
+ r"Human Health|"
47
+ r"Political):\s*(true|false)"
48
+ )
49
+ answers = re.findall(pattern, full_text)
50
+ result = {key: 1 if value.lower() == "true" else 0 for key, value in answers}
51
+ return result
52
+
53
+ def inference(input_text, prompt=None, typ="hf"):
54
+ default_prompt = """
55
+ Given the following historical newspaper text:
56
+ "{input_text}"
57
+
58
+ Analyze the text and provide a binary classification (respond ONLY with 'true' or 'false') for each impact category based on explicit mentions in the text. Follow these specific guidelines:
59
+ 1. ***Infrastructural Impact***: Classify as 'true' if the text mentions any damage or disruption to physical infrastructure and essential services. This includes structural damage to buildings, roads, or bridges; any disruptions to transportation systems such as railway cancellations or road closures; interruptions to public utilities including power and water supply; any failures in communication networks; or damage to industrial facilities. Consider only explicit mentions of physical damage or service disruptions in your classification.
60
+ 2. ***Agricultural Impact***: Classify as 'true' if the text mentions any weather-related effects on farming and livestock management operations. This includes yield variations in crops and animal products; direct damage to crops, timber resources, or livestock; modifications to agricultural practices or schedules; disruptions to food production or supply chains; impacts on farming equipment and resources; or effects on agricultural inputs including soil conditions, water availability for farming, and essential materials such as seedlings, fertilizers, or animal feed.
61
+ 3. ***Ecological Impact***: Classify as 'true' if the text mentions any effects on natural environments and ecosystems. This includes alterations to local environments and biodiversity; impacts on wildlife populations and behavior patterns; effects on non-agricultural plant life and vegetation; modifications to natural habitats including water bodies, forests, and wetlands; changes in hydrological systems such as river levels and lake conditions; or impacts on urban plant life.
62
+ 4. ***Financial Impact***: Classify as 'true' if the text explicitly mentions economic consequences of weather events. This includes direct monetary losses; business disruptions or closures requiring financial intervention; market price fluctuations or demand changes for specific goods; impacts on tourism and local economic activities; or insurance claims or economic relief measures. Focus only on explicit mentions of financial losses or fluctuations.
63
+ 5. ***Human Health Impact***: Classify as 'true' if the text mentions physical or mental health effects of weather events on populations. This includes direct injuries or fatalities (including cases where zero or more casualties are explicitly mentioned); elevated risks of weather-related or secondary illnesses; mental health consequences such as stress or anxiety; impacts on healthcare service accessibility; or long-term health implications.
64
+ 6. ***Political Impact***: Classify as 'true' if the text mentions governmental and policy responses to weather events. This includes government decision-making and policy modifications in response to events; changes in public opinion or political discourse; effects on electoral processes or outcomes; international relations and aid responses; or debates surrounding disaster preparedness and response capabilities.
65
+
66
+ Note:
67
+ - Return 'false' for any impact category that is either not present in the text or not related to weather events
68
+ - Base classifications on explicit mentions in the text
69
+ - Focus on direct impacts rather than implications
70
+ - Consider immediate and direct effects
71
+
72
+ Answer only once in the following format:
73
+ Infrastructural: true/false
74
+ Agricultural: true/false
75
+ Ecological: true/false
76
+ Financial: true/false
77
+ Human Health: true/false
78
+ Political: true/false
79
+ """
80
+ prompt = prompt or default_prompt
81
+ formatted_prompt = prompt.format(input_text=input_text)
82
+ if typ=="gpt":
83
+ try:
84
+ response = client.chat.completions.create(
85
+ model=model_name,
86
+ messages=[
87
+ {"role": "system", "content": "You are an assistant specialized in analyzing historical weather event impacts from historical newspaper."},
88
+ {"role": "user", "content": prompt}
89
+ ],
90
+ temperature=0,
91
+ top_p=0,
92
+ frequency_penalty=0,
93
+ presence_penalty=0
94
+ )
95
+ print(response.choices[0].message.content)
96
+ result = response.choices[0].message.content.strip()
97
+ except Exception as e:
98
+ print(f"Error: {e}")
99
+ else:
100
+ inputs = tokenizer(formatted_prompt, return_tensors="pt").to("cuda")
101
+ with torch.no_grad():
102
+ outputs = model.generate(**inputs,max_new_tokens=120)
103
+ result=tokenizer.decode(outputs[0], skip_special_tokens=True)
104
+ result=result[len(prompt):].strip()
105
+
106
+ return result, extract_answer(result)
107
+
108
+ def process_csv(input_csv, output_csv, prompt=None,typ="hf"):
109
+ count = 1
110
+ with open(output_csv, mode='w', encoding='utf-8', newline='') as csv_file:
111
+ fieldnames = [
112
+ "ID", "Date", "Time_Period", "Model_Type", "Infrastructural impact",
113
+ "Agricultural impact", "Ecological impact", "Financial impact",
114
+ "Human Health impact", "Political impact"
115
+ ]
116
+ writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
117
+ writer.writeheader()
118
+
119
+ with open(input_csv, mode='r', encoding='utf-8') as input_file:
120
+ csv_reader = csv.DictReader(input_file)
121
+ for row in csv_reader:
122
+ original_text = row.get("Article", "")
123
+ date = row.get("Date", "")
124
+ type_row = row.get("Time_Period", "")
125
+ id_row = row.get("ID", "")
126
+
127
+ original_response, extracted_response = inference(original_text,typ=typ)
128
+
129
+ result = {
130
+ "ID": id_row,
131
+ "Date": date,
132
+ "Time_Period": type_row,
133
+ "Model_Type": model_name,
134
+ "Infrastructural impact": extracted_response.get("Infrastructural", ""),
135
+ "Agricultural impact": extracted_response.get("Agricultural", ""),
136
+ "Ecological impact": extracted_response.get("Ecological", ""),
137
+ "Financial impact": extracted_response.get("Financial", ""),
138
+ "Human Health impact": extracted_response.get("Human Health", ""),
139
+ "Political impact": extracted_response.get("Political", ""),
140
+ }
141
+
142
+ writer.writerow(result)
143
+
144
+ print(f'Finished {count}')
145
+ count += 1
146
+
147
+
148
+ input_csv = "your-input.csv" # Change here for your input file name
149
+ out_csv = "your-output.csv" # Change here for your output file name
150
+ process_csv(input_csv,out_csv)
151
+ print(f"Results written to {out_csv}")
QA-ranking_Task/GPT_eval.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import random
3
+ import copy
4
+ from tqdm import tqdm
5
+ import time
6
+ import json
7
+ from openai import OpenAI
8
+ import os
9
+ import dotenv
10
+ import tempfile
11
+ import numpy as np
12
+ import pytrec_eval
13
+ from typing import List, Dict, Any, Optional
14
+ from dataclasses import dataclass
15
+ from concurrent.futures import ThreadPoolExecutor
16
+
17
+ dotenv.load_dotenv()
18
+
19
+ @dataclass
20
+ class RankingResult:
21
+ query: str
22
+ correct_passage: str
23
+ ranking: str
24
+ correct_idx: int
25
+ passages: List[str]
26
+ ranks: List[int]
27
+
28
+ class GPTReranker:
29
+ def __init__(self, api_key: str, model: str = "gpt-4o"):
30
+ self.client = OpenAI(api_key=api_key)
31
+ self.model = model
32
+
33
+ def _create_messages(self, query: str, passages: List[str], start_idx: int) -> List[Dict[str, str]]:
34
+ messages = [
35
+ {
36
+ "role": "system",
37
+ "content": "You are an expert that ranks passages based on their relevance to a given query. The most relevant passage should answer the query"
38
+ },
39
+ {
40
+ "role": "user",
41
+ "content": f"Query: {query}\n\nRank the following passages [{start_idx+1} to {start_idx+len(passages)}] by relevance."
42
+ }
43
+ ]
44
+
45
+ for i, passage in enumerate(passages):
46
+ messages.extend([
47
+ {"role": "user", "content": f"[{start_idx+i+1}] {passage}"},
48
+ {"role": "assistant", "content": f"Received passage [{start_idx+i+1}]."}
49
+ ])
50
+
51
+ messages.append({
52
+ "role": "user",
53
+ "content": "Provide ranking as numbers separated by '>', e.g., [3] > [1] > [2] > [5] > [4]. No explanation needed."
54
+ })
55
+
56
+ return messages
57
+
58
+ def get_ranking(self, query: str, passages: List[str], start_idx: int = 0, max_retries: int = 3) -> str:
59
+ messages = self._create_messages(query, passages, start_idx)
60
+ for attempt in range(max_retries):
61
+ try:
62
+ response = self.client.chat.completions.create(
63
+ model=self.model,
64
+ messages=messages,
65
+ temperature=0,
66
+ max_tokens=150,
67
+ timeout=30
68
+ )
69
+ return response.choices[0].message.content.strip()
70
+ except Exception as e:
71
+ print(f"Attempt {attempt + 1} failed: {str(e)}")
72
+ if attempt == max_retries - 1:
73
+ raise
74
+ time.sleep(5)
75
+
76
+ @dataclass
77
+ class RankingResult:
78
+ query: str
79
+ correct_passage: str
80
+ ranking: str
81
+ correct_idx: int
82
+ passages: List[str]
83
+ ranks: List[int]
84
+
85
+ class Evaluator:
86
+ @staticmethod
87
+ def clean_ranking_response(response: str) -> List[int]:
88
+ return [int(num) for num in ''.join(c if c.isdigit() else ' ' for c in response).split()]
89
+
90
+ @staticmethod
91
+ def write_trec_files(results: List[RankingResult]) -> tuple[str, str]:
92
+ run_file = tempfile.NamedTemporaryFile(delete=False).name
93
+ qrels_file = tempfile.NamedTemporaryFile(delete=False).name
94
+
95
+ with open(run_file, 'w') as f_run, open(qrels_file, 'w') as f_qrel:
96
+ for i, result in enumerate(results):
97
+ qid = str(i)
98
+ correct_docid = f"passage_{result.correct_idx}"
99
+ f_qrel.write(f"{qid} 0 {correct_docid} 1\n")
100
+ seen_ranks = set()
101
+ adjusted_ranks = []
102
+
103
+ for rank in result.ranks:
104
+ # If we've seen this rank before, increment until we find an unused rank
105
+ while rank in seen_ranks:
106
+ rank += 1
107
+ seen_ranks.add(rank)
108
+ adjusted_ranks.append(rank)
109
+
110
+ for rank_position, passage_num in enumerate(adjusted_ranks, 1):
111
+ docid = f"passage_{passage_num+1}" # Convert to 1-based passage numbering
112
+ score = 1.0/rank_position
113
+ f_run.write(f"{qid} Q0 {docid} {rank_position} {score:.4f} run\n")
114
+
115
+ return qrels_file, run_file
116
+
117
+ @staticmethod
118
+ def calculate_metrics(qrels_file: str, run_file: str) -> Dict[str, float]:
119
+ with open(qrels_file) as f_qrel, open(run_file) as f_run:
120
+ qrel = pytrec_eval.parse_qrel(f_qrel)
121
+ run = pytrec_eval.parse_run(f_run)
122
+
123
+ evaluator = pytrec_eval.RelevanceEvaluator(qrel, {'ndcg_cut.1', 'ndcg_cut.5', 'ndcg_cut.10'})
124
+ scores = evaluator.evaluate(run)
125
+
126
+ metrics = {'NDCG@1': 0.0, 'NDCG@5': 0.0, 'NDCG@10': 0.0}
127
+ for query_scores in scores.values():
128
+ metrics['NDCG@1'] += query_scores['ndcg_cut_1']
129
+ metrics['NDCG@5'] += query_scores['ndcg_cut_5']
130
+ metrics['NDCG@10'] += query_scores['ndcg_cut_10']
131
+
132
+ return {k: round(v / len(scores), 4) for k, v in metrics.items()}
133
+
134
+ def process_query(row: pd.Series, reranker: GPTReranker) -> Optional[RankingResult]:
135
+ try:
136
+ query = row['query']
137
+ correct_passage_idx = int(row['correct_passage_index'])
138
+ passages = [row[f'passage_{i}'] for i in range(1, 101)]
139
+
140
+ ranking_response = reranker.get_ranking(query, passages)
141
+ ranks = [i-1 for i in Evaluator.clean_ranking_response(ranking_response)]
142
+
143
+ return RankingResult(
144
+ query=query,
145
+ correct_passage=passages[correct_passage_idx - 1],
146
+ ranking=ranking_response,
147
+ correct_idx=correct_passage_idx,
148
+ passages=passages,
149
+ ranks=ranks
150
+ )
151
+ except Exception as e:
152
+ print(f"Error processing query: {str(e)}")
153
+ return None
154
+
155
+ def main():
156
+ api_key = os.environ.get("OPENAI_API_KEY")
157
+ if not api_key:
158
+ raise ValueError("OpenAI API key not found")
159
+
160
+ df = pd.read_csv('./ranking/candidate_pool_query_passage.csv')
161
+ reranker = GPTReranker(api_key)
162
+
163
+ results = []
164
+ for _, row in tqdm(df.iterrows()):
165
+ if result := process_query(row, reranker):
166
+ print(f"\nQuery: {result.query}")
167
+ print(f"Correct index: {result.correct_idx}")
168
+ print(f"Ranks: {result.ranks[:10]}") # Show first 10 ranks
169
+ results.append(result)
170
+ time.sleep(1)
171
+
172
+ qrels_file, run_file = Evaluator.write_trec_files(results)
173
+
174
+ print("\nQRELS file contents:")
175
+ with open(qrels_file, 'r') as f:
176
+ print(f.read())
177
+
178
+ print("\nRun file contents:")
179
+ with open(run_file, 'r') as f:
180
+ print(f.read())
181
+
182
+ metrics = Evaluator.calculate_metrics(qrels_file, run_file)
183
+
184
+ print("\nEvaluation Results:")
185
+ for metric, score in metrics.items():
186
+ print(f"{metric}: {score:.4f}")
187
+
188
+ os.unlink(qrels_file)
189
+ os.unlink(run_file)
190
+ results_df = pd.DataFrame([vars(r) for r in results])
191
+ results_df.to_csv('reranking_100_passages_GPT_4o.csv', index=False)
192
+
193
+ main()
QA-ranking_Task/Generate_Pool.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import random
3
+
4
+ RANDOM_SEED = 1510
5
+ random.seed(RANDOM_SEED)
6
+
7
+ input_file = "passages_and_queries.csv"
8
+ output_file = "query_and_100_passages.csv"
9
+
10
+ data = pd.read_csv(input_file)
11
+ output_data = []
12
+
13
+ for idx, row in data.iterrows():
14
+ correct_passage = row['Text']
15
+ query = row['Generated_Query']
16
+
17
+ all_passages = data['Text'].tolist()
18
+ all_passages.remove(correct_passage)
19
+ distractors = random.sample(all_passages, 99)
20
+
21
+ passages = distractors + [correct_passage]
22
+ random.shuffle(passages)
23
+
24
+ correct_index = passages.index(correct_passage) + 1
25
+
26
+ new_row = {
27
+ "id": idx + 1,
28
+ "query": query,
29
+ "correct_passage_index": correct_index
30
+ }
31
+ for i, passage in enumerate(passages):
32
+ new_row[f"passage_{i+1}"] = passage
33
+
34
+ output_data.append(new_row)
35
+
36
+
37
+ output_df = pd.DataFrame(output_data)
38
+ output_df.to_csv(output_file, index=False)
QA-ranking_Task/Generate_Query.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from openai import OpenAI
3
+ import os
4
+ import dotenv
5
+ import time
6
+ from tqdm import tqdm
7
+
8
+ dotenv.load_dotenv()
9
+
10
+ client = OpenAI(
11
+ api_key=os.environ.get("OPENAI_API_KEY")
12
+ )
13
+ def create_prompt(row):
14
+ impacts = []
15
+ if row['Infrastructural Impact'] > 0:
16
+ impacts.append('infrastructure')
17
+ if row['Political Impact'] > 0:
18
+ impacts.append('political')
19
+ if row['Economic Impact'] > 0:
20
+ impacts.append('economic')
21
+ if row['Ecological Impact'] > 0:
22
+ impacts.append('ecological')
23
+ if row['Agricultural Impact'] > 0:
24
+ impacts.append('agricultural')
25
+ if row['Human Health Impact'] > 0:
26
+ impacts.append('human health')
27
+
28
+ impact_str = ', '.join(impacts) if impacts else 'general'
29
+
30
+ prompt = f"""Given the following passage about {row['Weather']}, generate a specific question that:
31
+ 1. Can be answered using ONLY the information in this passage
32
+ 2. Focuses on the {impact_str} impacts mentioned
33
+ 3. Is detailed and specific to this exact situation
34
+ 4. Requires understanding the passage's unique context
35
+ 5. Cannot be answered by other similar passages about {row['Weather']}
36
+
37
+ Passage:
38
+ {row['Text']}
39
+
40
+ Generate a single, focused question that meets these criteria."""
41
+
42
+ return prompt
43
+
44
+ def generate_query(prompt, max_retries=3):
45
+ """Generate a query using GPT-4 with retry logic."""
46
+ for attempt in range(max_retries):
47
+ try:
48
+ response = client.chat.completions.create(
49
+ model="gpt-4o",
50
+ messages=[
51
+ {"role": "system", "content": "You are a helpful assistant that generates specific, focused questions about weather-related passages. Your questions should be answerable using only the information in the given passage."},
52
+ {"role": "user", "content": prompt}
53
+ ],
54
+ temperature=0.7,
55
+ max_tokens=400
56
+ )
57
+ return response.choices[0].message.content.strip()
58
+ except Exception as e:
59
+ if attempt == max_retries - 1:
60
+ print(f"Error after {max_retries} attempts: {e}")
61
+ return "Error generating query"
62
+ time.sleep(5)
63
+
64
+
65
+ df = pd.read_csv('datasets/context_data/reranking_passage.csv')
66
+ df['Generated_Query'] = ''
67
+ for idx in tqdm(df.index):
68
+ if df.loc[idx, 'Remove'] == 0:
69
+ prompt = create_prompt(df.loc[idx])
70
+ query = generate_query(prompt)
71
+ df.loc[idx, 'Generated_Query'] = query
72
+ time.sleep(1)
73
+ output_file = 'reranking_passage_with_queries.csv'
74
+ df.to_csv(output_file, index=False)
75
+ print(f"Results saved to {output_file}")
76
+
QA-ranking_Task/metrics.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import random
3
+ import copy
4
+ from tqdm import tqdm
5
+ import time
6
+ import json
7
+ from openai import OpenAI
8
+ import os
9
+ import dotenv
10
+ import tempfile
11
+ import numpy as np
12
+ import pytrec_eval
13
+ from typing import List, Dict, Any, Optional
14
+ from dataclasses import dataclass
15
+ from concurrent.futures import ThreadPoolExecutor
16
+
17
+ dotenv.load_dotenv()
18
+
19
+ @dataclass
20
+ class RankingResult:
21
+ query: str
22
+ correct_passage: str
23
+ ranking: str
24
+ correct_idx: int
25
+ passages: List[str]
26
+ ranks: List[int]
27
+
28
+ class Evaluator:
29
+ @staticmethod
30
+ def clean_ranking_response(response: str) -> List[int]:
31
+ return [int(num) for num in ''.join(c if c.isdigit() else ' ' for c in response).split()]
32
+
33
+ @staticmethod
34
+ def write_trec_files(results: List[RankingResult]) -> tuple[str, str]:
35
+ run_file = tempfile.NamedTemporaryFile(delete=False).name
36
+ qrels_file = tempfile.NamedTemporaryFile(delete=False).name
37
+
38
+ with open(run_file, 'w') as f_run, open(qrels_file, 'w') as f_qrel:
39
+ for i, result in enumerate(results):
40
+ qid = str(i)
41
+ correct_docid = f"passage_{result.correct_idx}"
42
+ f_qrel.write(f"{qid} 0 {correct_docid} 1\n")
43
+ seen_ranks = set()
44
+ adjusted_ranks = []
45
+
46
+ for rank in result.ranks:
47
+ while rank in seen_ranks:
48
+ rank += 1
49
+ seen_ranks.add(rank)
50
+ adjusted_ranks.append(rank)
51
+
52
+ for rank_position, passage_num in enumerate(adjusted_ranks, 1):
53
+ docid = f"passage_{passage_num+1}" # Convert to 1-based passage numbering
54
+ score = 1.0/rank_position
55
+ f_run.write(f"{qid} Q0 {docid} {rank_position} {score:.4f} run\n")
56
+
57
+ return qrels_file, run_file
58
+
59
+ @staticmethod
60
+ def calculate_metrics(qrels_file: str, run_file: str) -> Dict[str, float]:
61
+ with open(qrels_file) as f_qrel, open(run_file) as f_run:
62
+ qrel = pytrec_eval.parse_qrel(f_qrel)
63
+ run = pytrec_eval.parse_run(f_run)
64
+
65
+ evaluator = pytrec_eval.RelevanceEvaluator(
66
+ qrel,
67
+ {'ndcg_cut.1', 'ndcg_cut.5', 'ndcg_cut.10', 'recip_rank', 'recall.5'}
68
+ )
69
+ scores = evaluator.evaluate(run)
70
+
71
+ metrics = {
72
+ 'NDCG@1': 0.0,
73
+ 'NDCG@5': 0.0,
74
+ 'NDCG@10': 0.0,
75
+ 'MRR': 0.0,
76
+ 'Recall@5': 0.0
77
+ }
78
+
79
+ for query_scores in scores.values():
80
+ metrics['NDCG@1'] += query_scores['ndcg_cut_1']
81
+ metrics['NDCG@5'] += query_scores['ndcg_cut_5']
82
+ metrics['NDCG@10'] += query_scores['ndcg_cut_10']
83
+ metrics['MRR'] += query_scores['recip_rank']
84
+ metrics['Recall@5'] += query_scores['recall_5']
85
+
86
+ num_queries = len(scores)
87
+ return {k: round(v / num_queries, 4) for k, v in metrics.items()}
88
+
89
+
90
+ def load_results(filename: str) -> List[RankingResult]:
91
+ with open(filename, 'r', encoding='utf-8') as f:
92
+ results_data = json.load(f)
93
+
94
+ results = []
95
+ for data in results_data:
96
+ result = RankingResult(
97
+ query=data['query'],
98
+ correct_passage=data['correct_passage'],
99
+ ranking=data['ranking'],
100
+ correct_idx=data['correct_idx'],
101
+ passages=data['passages'],
102
+ ranks=data['ranks']
103
+ )
104
+ results.append(result)
105
+
106
+ return results
107
+
108
+ def main():
109
+ loaded_results = load_results('./your-output.json') # Change here when you have your output from QA ranking json
110
+ qrels_file, run_file = Evaluator.write_trec_files(loaded_results)
111
+
112
+ print("\nQRELS file contents:")
113
+ with open(qrels_file, 'r') as f:
114
+ print(f.read())
115
+
116
+ print("\nRun file contents:")
117
+ with open(run_file, 'r') as f:
118
+ print(f.read())
119
+
120
+ metrics = Evaluator.calculate_metrics(qrels_file, run_file)
121
+
122
+ print("\nEvaluation Results:")
123
+ for metric, score in metrics.items():
124
+ print(f"{metric}: {score:.4f}")
125
+
126
+ os.unlink(qrels_file)
127
+ os.unlink(run_file)
128
+ results_df = pd.DataFrame([vars(r) for r in loaded_results])
129
+ results_df.to_csv('ranking-results.csv', index=False) # Change here to save your final results
130
+
131
+ main()
QA-ranking_Task/model_eval.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import random
3
+ import copy
4
+ from tqdm import tqdm
5
+ import time
6
+ import json
7
+ from openai import OpenAI
8
+ import os
9
+ import dotenv
10
+ import tempfile
11
+ import numpy as np
12
+
13
+ from typing import List, Dict, Any, Optional
14
+ from dataclasses import dataclass
15
+ from concurrent.futures import ThreadPoolExecutor
16
+ from huggingface_hub import login
17
+ from transformers import AutoModelForCausalLM, AutoTokenizer
18
+ dotenv.load_dotenv()
19
+
20
+ API_KEY = ""
21
+ login(token=API_KEY)
22
+ huggingface_models=["meta-llama/Llama-3.1-8B-Instruct","Qwen/Qwen2.5-7B-Instruct","Qwen/Qwen2.5-14B-Instruct","Qwen/Qwen2.5-7B-Instruct"]
23
+ model_name = "Model_name" # Change here for testing your model
24
+
25
+ import torch
26
+ from transformers import AutoTokenizer, AutoModelForCausalLM
27
+
28
+ class GPTReranker:
29
+ def __init__(self, api_key: str, model_name: str = model_name, window_size: int = 30, overlap: int = 10):
30
+ if window_size <= overlap:
31
+ raise ValueError("Window size must be greater than overlap")
32
+ if overlap < 0:
33
+ raise ValueError("Overlap must be non-negative")
34
+
35
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
36
+ self.model = AutoModelForCausalLM.from_pretrained(
37
+ model_name,
38
+ torch_dtype=torch.float16,
39
+ device_map="auto"
40
+ )
41
+ self.model.gradient_checkpointing_enable()
42
+ self.model.eval() # Set to evaluation mode
43
+
44
+ self.window_size = window_size
45
+ self.overlap = overlap
46
+
47
+ def _create_messages(self, query: str, passages: List[str], start_idx: int) -> str:
48
+ prompt = f"""<s>[INST] You are an expert that ranks passages based on their relevance to a given query.
49
+ The most relevant passage should be ranked first.
50
+ Important: Do not just sort the passage numbers. Evaluate each passage's content for relevance.
51
+
52
+ Query: {query}
53
+
54
+ I will give you passages numbered from {start_idx+1} to {start_idx+len(passages)}. Rank them by relevance to the query, with the most relevant first.
55
+
56
+ """
57
+ for i, passage in enumerate(passages):
58
+ prompt += f"[{start_idx+i+1}] {passage}\n"
59
+
60
+ prompt += """
61
+ Based on the content of each passage (not just their numbers), rank them from most to least relevant.
62
+ Format: [most_relevant] > [next] > [next]. No explanation needed.[/INST]"""
63
+
64
+ return prompt
65
+
66
+ def get_ranking_for_group(self, query: str, passages: List[str], start_idx: int = 0, max_retries: int = 3) -> List[int]:
67
+ prompt = self._create_messages(query, passages, start_idx)
68
+
69
+ for attempt in range(max_retries):
70
+ try:
71
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
72
+
73
+ with torch.no_grad():
74
+ outputs = self.model.generate(
75
+ **inputs,
76
+ max_new_tokens=150,
77
+ temperature=0.0,
78
+ do_sample=False,
79
+ pad_token_id=self.tokenizer.eos_token_id
80
+ )
81
+ ranking_str = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
82
+ ranking_str = ranking_str[len(prompt):]
83
+
84
+ raw_ranks = Evaluator.clean_ranking_response(ranking_str)
85
+ global_ranks = []
86
+ for rank in raw_ranks:
87
+ local_idx = rank - (start_idx + 1)
88
+ if 0 <= local_idx < len(passages):
89
+ global_idx = start_idx + local_idx
90
+ global_ranks.append(global_idx)
91
+
92
+ return global_ranks
93
+
94
+ except Exception as e:
95
+ print(f"Attempt {attempt + 1} failed: {str(e)}")
96
+ if attempt == max_retries - 1:
97
+ raise
98
+ time.sleep(5)
99
+
100
+ def get_ranking(self, query: str, passages: List[str]) -> str:
101
+ if not passages:
102
+ raise ValueError("No passages provided")
103
+
104
+ first_group = passages[:40]
105
+ second_group = passages[40:80]
106
+ third_group = passages[80:]
107
+
108
+ first_ranks = self.get_ranking_for_group(query, first_group, 0)
109
+ print(f"First group top 10: {first_ranks[:10]}")
110
+ second_ranks = self.get_ranking_for_group(query, second_group, 40)
111
+ print(f"Second group top 10: {second_ranks[:10]}")
112
+ third_ranks = self.get_ranking_for_group(query, third_group, 80)
113
+ print(f"Third group top 10: {third_ranks[:10]}")
114
+
115
+ top_30_indices = []
116
+ if first_ranks:
117
+ top_30_indices.extend(first_ranks[:10])
118
+ if second_ranks:
119
+ top_30_indices.extend(second_ranks[:10])
120
+ if third_ranks:
121
+ top_30_indices.extend(third_ranks[:10])
122
+ top_30_passages = [passages[i] for i in top_30_indices]
123
+
124
+ final_local_ranks = self.get_ranking_for_group(query, top_30_passages, 0)
125
+
126
+ final_indices = []
127
+ for rank in final_local_ranks:
128
+ if rank < len(top_30_indices):
129
+ final_indices.append(top_30_indices[rank])
130
+
131
+ remaining_top = [idx for idx in top_30_indices if idx not in final_indices]
132
+ final_indices.extend(remaining_top)
133
+
134
+ all_other_indices = [i for i in range(len(passages)) if i not in top_30_indices]
135
+ final_indices.extend(all_other_indices)
136
+
137
+ ranking_str = " > ".join(f"[{r+1}]" for r in final_indices)
138
+ return ranking_str
139
+ @dataclass
140
+ class RankingResult:
141
+ query: str
142
+ correct_passage: str
143
+ ranking: str
144
+ correct_idx: int
145
+ passages: List[str]
146
+ ranks: List[int]
147
+
148
+ class Evaluator:
149
+ @staticmethod
150
+ def clean_ranking_response(response: str) -> List[int]:
151
+ return [int(num) for num in ''.join(c if c.isdigit() else ' ' for c in response).split()]
152
+
153
+ def process_query(row: pd.Series, reranker: GPTReranker) -> Optional[RankingResult]:
154
+ try:
155
+ query = row['query']
156
+ correct_passage_idx = int(row['correct_passage_index'])
157
+ passages = [row[f'passage_{i}'] for i in range(1, 101)]
158
+
159
+ ranking_response = reranker.get_ranking(query, passages)
160
+ ranks = [i-1 for i in Evaluator.clean_ranking_response(ranking_response)]
161
+
162
+ return RankingResult(
163
+ query=query,
164
+ correct_passage=passages[correct_passage_idx - 1],
165
+ ranking=ranking_response,
166
+ correct_idx=correct_passage_idx,
167
+ passages=passages,
168
+ ranks=ranks
169
+ )
170
+ except Exception as e:
171
+ print(f"Error processing query: {str(e)}")
172
+ return None
173
+
174
+ def save_results(results: List[RankingResult], filename: str):
175
+ results_data = []
176
+ for result in results:
177
+ results_data.append({
178
+ 'query': result.query,
179
+ 'correct_passage': result.correct_passage,
180
+ 'ranking': result.ranking,
181
+ 'correct_idx': result.correct_idx,
182
+ 'passages': result.passages,
183
+ 'ranks': result.ranks
184
+ })
185
+
186
+ with open(filename, 'w', encoding='utf-8') as f:
187
+ json.dump(results_data, f, ensure_ascii=False, indent=2)
188
+
189
+
190
+ def main():
191
+ df = pd.read_csv('./datasets/QACandidate_Pool.csv')
192
+ reranker = GPTReranker(API_KEY)
193
+ results = []
194
+ for _, row in tqdm(df.iterrows()):
195
+ if result := process_query(row, reranker):
196
+ print(f"\nQuery: {result.query}") # Show the query sentence
197
+ print(f"Correct index: {result.correct_idx}") # Show the correct index
198
+ print(f"Ranks: {result.ranks[:10]}") # Show first 10 ranks
199
+ results.append(result)
200
+ time.sleep(1)
201
+
202
+ save_results(results, 'output.json') # Change here for your output file name
203
+ time.sleep(1)
204
+
205
+ main()
datasets/LongCTX_Dataset(350).csv ADDED
The diff for this file is too large to render. See raw diff
 
datasets/MixedCTX_Dataset(1386).csv ADDED
The diff for this file is too large to render. See raw diff
 
datasets/QACandidate_Pool.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f04bd606ccd1b4f4aafd8879f7e0a51f01ab0514604a820413363e0d117cb22
3
+ size 36352782