File size: 4,407 Bytes
9c6961c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import os
import json
import tqdm
import argparse
from openai import OpenAI

# -----------------------------
#  CONFIGURATION
# -----------------------------
# Ensure this matches the model path used in your run_vllm.sh script
MODEL_NAME = "/home/mshahidul/readctrl_model/full_model/qwen3-32B_subclaims_BF16_merged"
API_URL = "http://localhost:8015/v1"
API_KEY = "EMPTY"  # vLLM requires a key, but it can be anything if not set on server

# Initialize Client
client = OpenAI(base_url=API_URL, api_key=API_KEY)

# -----------------------------
#  SUBCLAIM EXTRACTION PROMPT
# -----------------------------
def extraction_prompt(medical_text: str) -> str:
    prompt = f"""
You are an expert medical annotator. Your task is to extract granular, factual subclaims from medical text.
A subclaim is the smallest standalone factual unit that can be independently verified.
Instructions:
1. Read the provided medical text.
2. Break it into clear, objective, atomic subclaims.
3. Each subclaim must come directly from the text.
4. Do not add, guess, or infer information.
5. Each subclaim should be short, specific, and verifiable.
6. Return ONLY a Python-style list of strings.
Medical Text:
{medical_text}
Return your output in JSON list format, like:
[
  "subclaim 1",
  "subclaim 2",
  ...
]
"""
    return prompt

# -----------------------------
#  INFERENCE FUNCTION (vLLM)
# -----------------------------
def infer_subclaims(medical_text: str, temperature: float = 0.2) -> str:
    """Sends prompt to vLLM server and returns generated text."""
    
    # 1. Prepare the prompt
    final_prompt = extraction_prompt(medical_text)
    
    # 2. Call the vLLM Server via OpenAI API
    try:
        response = client.chat.completions.create(
            model=MODEL_NAME,
            messages=[
                {"role": "user", "content": final_prompt}
            ],
            max_tokens=1000,       # Limit generation length
            temperature=temperature,
            top_p=0.9,
            frequency_penalty=0.0,
            presence_penalty=0.0,
        )
        res = response.choices[0].message.content.strip()
        res=res.split("</think>")[-1].strip()
        return res
    except Exception as e:
        print(f"Error during API call: {e}")
        return None

# -----------------------------
#  MAIN EXECUTION
# -----------------------------
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--input_file", type=str, required=True,
                        help="Path to the input JSON file containing medical texts.")
    args = parser.parse_args()

    INPUT_FILE = args.input_file
    file_name = os.path.basename(INPUT_FILE).split(".json")[0]
    
    SAVE_FOLDER = "/home/mshahidul/readctrl/data/extracting_subclaim"
    os.makedirs(SAVE_FOLDER, exist_ok=True)
    OUTPUT_FILE = os.path.join(SAVE_FOLDER, f"extracted_subclaims_{file_name}.json")

    # Load input dataset
    with open(INPUT_FILE, "r") as f:
        data = json.load(f)

    # Load existing results (resume mode)
    result = []
    if os.path.exists(OUTPUT_FILE):
        with open(OUTPUT_FILE, "r") as f:
            try:
                result = json.load(f)
            except json.JSONDecodeError:
                result = []
    
    existing_ids = {item["id"] for item in result}

    print(f"Starting inference on {len(data)} items using vLLM server...")
    save=False
    # --------------------------------------------------------
    # PROCESS EACH MEDICAL TEXT
    # --------------------------------------------------------
    for item in tqdm.tqdm(data):
        if item["id"] in existing_ids:
            continue

        medical_text = item.get("fulltext", "")
        
        # Call the vLLM inference function
        extracted = infer_subclaims(medical_text)

        result.append({
            "id": item["id"],
            "medical_text": medical_text,
            "subclaims": extracted,
            "summary": item.get("summary", "")
        })

        # Save every 20 entries
        if len(result) % 20 == 0:
            with open(OUTPUT_FILE, "w") as f:
                if save:
                    json.dump(result, f, indent=4, ensure_ascii=False)

    # Final save
    with open(OUTPUT_FILE, "w") as f:
        if save:
            json.dump(result, f, indent=4, ensure_ascii=False)

    print(f"Extraction completed. Saved to {OUTPUT_FILE}")