File size: 2,550 Bytes
030876e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
from openai import OpenAI
import json, os
import tqdm

# Load prompt template (v3) with INPUT_TEXT placeholder
with open("/home/mshahidul/readctrl/prompts/syn_dataset_subclaims_support_check_v3.txt", "r") as f:
    prompt_template = f.read()

# Load translated source articles that will be plugged into the prompt
source_path = "/home/mshahidul/readctrl/data/translated_data/multiclinsum_gs_train_en2bn_gemma_(0-200).json"
with open(source_path, "r") as f:
    source_data = json.load(f)

api_file = "/home/mshahidul/api_new.json"
with open(api_file, "r") as f:
    api_keys = json.load(f)
openai_api_key = api_keys["openai"]

client = OpenAI(api_key=openai_api_key)


def openai_return(prompt, model="gpt-5"):
    """Send a prompt to GPT and parse JSON."""
    response = client.chat.completions.create(
        model=model,
        messages=[
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": prompt},
        ],
    )
    content = response.choices[0].message.content.strip()
    cleaned = content.replace("```json", "").replace("```", "").strip()
    try:
        return json.loads(cleaned)
    except json.JSONDecodeError:
        print("⚠️ JSON parse failed — storing raw text.")
        return cleaned


# Save path for the new dataset generated from translated_fulltext
save_dir = "/home/mshahidul/readctrl/data/finetuning_data/new_v2"
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, "finetune_dataset_subclaim_support_bn.json")

res = []
if os.path.exists(save_path):
    with open(save_path, "r") as f:
        res = json.load(f)

# Resume from where we left off, if any previous results exist
start_idx = len(res)

for idx in tqdm.tqdm(range(start_idx, len(source_data))):
    item = source_data[idx]
    input_text = item.get("translated_fulltext", "").strip()
    if not input_text:
        continue

    # Fill the prompt template with the current article text
    prompt = prompt_template.replace("{{INPUT_TEXT}}", input_text)
    model_output = openai_return(prompt, model="gpt-5")
    # import ipdb; ipdb.set_trace()

    res.append(
        {
            "id": item.get("id"),
            "input_text": input_text,
            "model_output": model_output,
        }
    )


    if len(res) % 2 == 0:
        with open(save_path, "w") as f:
            json.dump(res, f, indent=2, ensure_ascii=False)
        print(f"Saved {len(res)} samples so far.")

with open(save_path, "w") as f:
    json.dump(res, f, indent=2, ensure_ascii=False)