|
|
import os |
|
|
import json |
|
|
import time |
|
|
import random |
|
|
from typing import Dict, Any, List, Optional, Tuple |
|
|
|
|
|
from openai import OpenAI |
|
|
import multiprocessing as mp |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_json(file_path: str): |
|
|
with open(file_path, "r", encoding="utf-8") as f: |
|
|
return json.load(f) |
|
|
|
|
|
def append_jsonl(path: str, obj: Dict[str, Any]): |
|
|
os.makedirs(os.path.dirname(path), exist_ok=True) |
|
|
with open(path, "a", encoding="utf-8") as f: |
|
|
f.write(json.dumps(obj, ensure_ascii=False) + "\n") |
|
|
|
|
|
def load_done_keys(jsonl_path: str): |
|
|
done = set() |
|
|
if not os.path.exists(jsonl_path): |
|
|
return done |
|
|
with open(jsonl_path, "r", encoding="utf-8") as f: |
|
|
for line in f: |
|
|
line = line.strip() |
|
|
if not line: |
|
|
continue |
|
|
try: |
|
|
obj = json.loads(line) |
|
|
k = obj.get("key") |
|
|
if k is not None: |
|
|
done.add(k) |
|
|
except Exception: |
|
|
continue |
|
|
return done |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def safe_json_parse(text: str) -> Optional[Dict[str, Any]]: |
|
|
text = (text or "").strip() |
|
|
if text.startswith("```"): |
|
|
text = text.strip("`").strip() |
|
|
if text.lower().startswith("json"): |
|
|
text = text[4:].strip() |
|
|
try: |
|
|
return json.loads(text) |
|
|
except Exception: |
|
|
return None |
|
|
|
|
|
def call_chat_json( |
|
|
client: OpenAI, |
|
|
model: str, |
|
|
system: str, |
|
|
user: str, |
|
|
max_tokens: int, |
|
|
temperature: float, |
|
|
top_p: float = 0.9, |
|
|
max_retries: int = 4, |
|
|
) -> Dict[str, Any]: |
|
|
last_err = None |
|
|
for attempt in range(1, max_retries + 1): |
|
|
try: |
|
|
resp = client.chat.completions.create( |
|
|
model=model, |
|
|
messages=[ |
|
|
{"role": "system", "content": system}, |
|
|
{"role": "user", "content": user}, |
|
|
], |
|
|
temperature=temperature, |
|
|
top_p=top_p, |
|
|
max_tokens=max_tokens, |
|
|
) |
|
|
text = resp.choices[0].message.content |
|
|
obj = safe_json_parse(text) |
|
|
if obj is None: |
|
|
raise ValueError(f"JSON parse failed. Raw: {text[:200]}...") |
|
|
return obj |
|
|
except Exception as e: |
|
|
last_err = str(e) |
|
|
time.sleep(1.0 * attempt) |
|
|
raise RuntimeError(f"call_chat_json failed after {max_retries} retries. Last error: {last_err}") |
|
|
|
|
|
def normalize_text(s: str) -> str: |
|
|
return " ".join((s or "").lower().split()) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_BINARY_ANS = {"yes", "no", "true", "false"} |
|
|
_AUX_START = { |
|
|
"is", "are", "was", "were", |
|
|
"do", "does", "did", |
|
|
"can", "could", |
|
|
"will", "would", "should", |
|
|
"has", "have", "had", |
|
|
"may", "might", "must", |
|
|
} |
|
|
|
|
|
def is_binary_qa(question: str, gt_answer: str) -> bool: |
|
|
a = normalize_text(gt_answer) |
|
|
if a in _BINARY_ANS: |
|
|
return True |
|
|
q = (question or "").strip().lower() |
|
|
|
|
|
if q.endswith("?"): |
|
|
first = q.split()[0] if q.split() else "" |
|
|
if first in _AUX_START: |
|
|
return True |
|
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
SYSTEM_STATEMENT = r""" |
|
|
You generate ONE correct answer sentence given a situation and a QA pair. |
|
|
|
|
|
CRITICAL: |
|
|
- The output must be a single, continuous sentence that integrates the situation text with a short answer clause that resolves the question (e.g., “When …, …”). |
|
|
|
|
|
Return STRICT JSON: {"statement": "..."} only. |
|
|
|
|
|
Rules: |
|
|
- Must be accurate given the answer. |
|
|
- Natural, concise, factual. |
|
|
- Preserve referenced entity labels if any (e.g., "chair A"). |
|
|
- JSON only. |
|
|
""" |
|
|
|
|
|
USER_STATEMENT = """Situation: {situation} |
|
|
Question: {question} |
|
|
Ground-truth answer: {answer} |
|
|
|
|
|
Return JSON only. |
|
|
""" |
|
|
|
|
|
def gen_statement(client: OpenAI, model: str, situation: str, question: str, answer: str) -> str: |
|
|
obj = call_chat_json( |
|
|
client=client, |
|
|
model=model, |
|
|
system=SYSTEM_STATEMENT, |
|
|
user=USER_STATEMENT.format(situation=situation, question=question, answer=answer), |
|
|
max_tokens=512, |
|
|
temperature=0.2, |
|
|
) |
|
|
st = obj.get("statement", "") |
|
|
if not isinstance(st, str) or not st.strip(): |
|
|
raise ValueError(f"Bad statement: {obj}") |
|
|
return st.strip() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
SYSTEM_WRONG_9 = r""" |
|
|
You generate distractors for a ground-truth statement. |
|
|
|
|
|
CRITICAL: |
|
|
- EVERY sentence MUST integrates the situation text with a wrong answer clause (e.g., “When …, …”). |
|
|
|
|
|
OUTPUT (STRICT JSON ONLY): |
|
|
{{"distractors": ["<s1>", "<s2>", "<s3>", "<s4>", "<s5>", "<s6>", "<s7>", "<s8>", "<s9>"]}} |
|
|
|
|
|
Rules: |
|
|
- The situation should be fixed in each sentence. |
|
|
- Exactly 9 strings, all unique. |
|
|
- Each is a single declarative sentence. |
|
|
- Each must be WRONG given the ground-truth answer. |
|
|
- Preserve the same main subject/entity (same instance label if present). |
|
|
- The distractors should be diverse, non-ambiguous, realistic. The difference between ground-truth and distractors should be VERY clear. |
|
|
- No markdown, no extra keys. |
|
|
""" |
|
|
|
|
|
USER_WRONG_9 = """Situation: {situation} |
|
|
Question: {question} |
|
|
Ground-truth answer: {answer} |
|
|
Correct statement: {correct_statement} |
|
|
|
|
|
TASK |
|
|
Generate 9 WRONG but plausible alternative declarative statements. |
|
|
|
|
|
Return JSON only: |
|
|
{{"distractors": ["...","...","...","...","...","...","...","...","..."]}} |
|
|
""" |
|
|
|
|
|
SYSTEM_WRONG_1 = r""" |
|
|
You generate distractors for a ground-truth statement. |
|
|
|
|
|
CRITICAL: |
|
|
- EVERY sentence MUST integrates the situation text with a wrong answer clause (e.g., “When …, …”). |
|
|
|
|
|
OUTPUT (STRICT JSON ONLY): |
|
|
{{"distractors": ["<s1>"]}} |
|
|
|
|
|
Rules: |
|
|
- The situation should be fixed in the sentence. |
|
|
- Exactly 1 string. |
|
|
- The sentence must be WRONG given the ground-truth answer. |
|
|
- Preserve the same main subject/entity (same instance label if present). |
|
|
- Make the wrong answer VERY clear (not ambiguous). |
|
|
- No markdown, no extra keys. |
|
|
""" |
|
|
|
|
|
USER_WRONG_1 = """Situation: {situation} |
|
|
Question: {question} |
|
|
Ground-truth answer: {answer} |
|
|
Correct statement: {correct_statement} |
|
|
|
|
|
TASK |
|
|
Generate 1 WRONG but plausible alternative declarative statement. |
|
|
|
|
|
Return JSON only: |
|
|
{{"distractors": ["..."]}} |
|
|
""" |
|
|
|
|
|
def _add_unique(collected: List[str], seen: set, candidates: List[Any], situation: str) -> None: |
|
|
for x in candidates: |
|
|
if not isinstance(x, str): |
|
|
continue |
|
|
x = x.strip() |
|
|
if not x: |
|
|
continue |
|
|
x = x |
|
|
nx = normalize_text(x) |
|
|
if nx in seen: |
|
|
continue |
|
|
seen.add(nx) |
|
|
collected.append(x) |
|
|
|
|
|
def gen_wrong_sentences( |
|
|
client: OpenAI, |
|
|
model: str, |
|
|
situation: str, |
|
|
question: str, |
|
|
answer: str, |
|
|
correct_statement: str, |
|
|
n: int = 9, |
|
|
batch_rounds: int = 5, |
|
|
) -> List[str]: |
|
|
correct_norm = normalize_text(correct_statement) |
|
|
|
|
|
if n == 1: |
|
|
system = SYSTEM_WRONG_1 |
|
|
user_tmpl = USER_WRONG_1 |
|
|
max_tokens = 256 |
|
|
else: |
|
|
system = SYSTEM_WRONG_9 |
|
|
user_tmpl = USER_WRONG_9 |
|
|
max_tokens = 1024 |
|
|
|
|
|
for _ in range(batch_rounds): |
|
|
obj = call_chat_json( |
|
|
client=client, |
|
|
model=model, |
|
|
system=system, |
|
|
user=user_tmpl.format( |
|
|
situation=situation, |
|
|
question=question, |
|
|
answer=answer, |
|
|
correct_statement=correct_statement, |
|
|
), |
|
|
max_tokens=max_tokens, |
|
|
temperature=0.95, |
|
|
) |
|
|
|
|
|
ds = obj.get("distractors", None) |
|
|
if not isinstance(ds, list): |
|
|
continue |
|
|
|
|
|
seen = {correct_norm} |
|
|
out: List[str] = [] |
|
|
for s in ds: |
|
|
if not isinstance(s, str): |
|
|
continue |
|
|
s = s.strip() |
|
|
ns = normalize_text(s) |
|
|
if ns in seen: |
|
|
continue |
|
|
seen.add(ns) |
|
|
out.append(s) |
|
|
if len(out) >= n: |
|
|
return out[:n] |
|
|
|
|
|
raise ValueError(f"Could not collect {n} unique distractors in {batch_rounds} batch rounds.") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def build_mcq(correct: str, wrongs: List[str], seed: Optional[int] = None): |
|
|
options = [correct] + list(wrongs) |
|
|
if seed is not None: |
|
|
rnd = random.Random(seed) |
|
|
rnd.shuffle(options) |
|
|
else: |
|
|
random.shuffle(options) |
|
|
label = options.index(correct) |
|
|
return options, label |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_WORKER_CLIENT = None |
|
|
_WORKER_MODEL = None |
|
|
|
|
|
def _init_worker(base_url: str, model_name: str, timeout: int = 3600): |
|
|
global _WORKER_CLIENT, _WORKER_MODEL |
|
|
_WORKER_MODEL = model_name |
|
|
_WORKER_CLIENT = OpenAI(api_key="EMPTY", base_url=base_url, timeout=timeout) |
|
|
|
|
|
def _process_one(args: Tuple[str, int, str, str, str, str]) -> Dict[str, Any]: |
|
|
|
|
|
key, idx, scene_id, situation, question, gt_answer = args |
|
|
try: |
|
|
correct_stmt = gen_statement(_WORKER_CLIENT, _WORKER_MODEL, situation, question, gt_answer) |
|
|
|
|
|
|
|
|
n_wrong = 1 if is_binary_qa(question, gt_answer) else 9 |
|
|
wrongs = gen_wrong_sentences( |
|
|
_WORKER_CLIENT, |
|
|
_WORKER_MODEL, |
|
|
situation, |
|
|
question, |
|
|
gt_answer, |
|
|
correct_stmt, |
|
|
n=n_wrong, |
|
|
batch_rounds=6 if n_wrong == 9 else 4, |
|
|
) |
|
|
|
|
|
seed = abs(hash(key)) % (2**32) |
|
|
options, label = build_mcq(correct_stmt, wrongs, seed=seed) |
|
|
|
|
|
out = { |
|
|
"key": key, |
|
|
"scene_id": scene_id, |
|
|
"situation": situation, |
|
|
"question": question, |
|
|
"gt_answer": gt_answer, |
|
|
"correct": correct_stmt, |
|
|
"options": options, |
|
|
"label": label, |
|
|
"is_binary": (n_wrong == 1), |
|
|
} |
|
|
return {"ok": True, "out": out} |
|
|
|
|
|
except Exception as e: |
|
|
return {"ok": False, "err": { |
|
|
"key": key, |
|
|
"scene_id": scene_id, |
|
|
"situation": situation, |
|
|
"question": question, |
|
|
"gt_answer": gt_answer, |
|
|
"error": str(e), |
|
|
}} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
base_url = "http://lrc-alpha-sg-gpu06:22002/v1" |
|
|
model_name = "Qwen/Qwen3-VL-8B-Instruct" |
|
|
|
|
|
in_path = "/home/m50048399/transfered/ye_project/PointMapVerse/existing_datasets/ScanNet/annotations/sqa_task/balanced/v1_balanced_questions_test_scannetv2.json" |
|
|
ann_path = "/home/m50048399/transfered/ye_project/PointMapVerse/existing_datasets/ScanNet/annotations/sqa_task/balanced/v1_balanced_sqa_annotations_test_scannetv2.json" |
|
|
|
|
|
out_jsonl = "/home/m50048399/transfered/ye_project/PointMapVerse/derived/sqa_sentence_mcq_test_scannetv2.jsonl" |
|
|
err_jsonl = out_jsonl + ".errors.jsonl" |
|
|
|
|
|
qdata = load_json(in_path)["questions"] |
|
|
adata = load_json(ann_path)["annotations"] |
|
|
|
|
|
done = load_done_keys(out_jsonl) |
|
|
print(f"Loaded {len(qdata)} items. Already done: {len(done)}") |
|
|
|
|
|
tasks = [] |
|
|
for idx, item in enumerate(qdata): |
|
|
scene_id = item.get("scene_id", "") |
|
|
question = (item.get("question") or "").strip() |
|
|
situation = (item.get("situation") or "").strip() |
|
|
|
|
|
gt_answer = "" |
|
|
try: |
|
|
gt_answer = (adata[idx]["answers"][0]["answer"] or "").strip() |
|
|
except Exception: |
|
|
gt_answer = "" |
|
|
|
|
|
if not situation or not question or not gt_answer: |
|
|
continue |
|
|
|
|
|
key = f"{scene_id}::{idx}" |
|
|
if key in done: |
|
|
continue |
|
|
|
|
|
tasks.append((key, idx, scene_id, situation, question, gt_answer)) |
|
|
|
|
|
print(f"To process: {len(tasks)}") |
|
|
|
|
|
ctx = mp.get_context("spawn") |
|
|
with ctx.Pool( |
|
|
processes=8, |
|
|
initializer=_init_worker, |
|
|
initargs=(base_url, model_name, 3600), |
|
|
maxtasksperchild=50, |
|
|
) as pool: |
|
|
processed = 0 |
|
|
ok_cnt = 0 |
|
|
err_cnt = 0 |
|
|
|
|
|
for res in pool.imap_unordered(_process_one, tasks, chunksize=4): |
|
|
processed += 1 |
|
|
if res["ok"]: |
|
|
append_jsonl(out_jsonl, res["out"]) |
|
|
ok_cnt += 1 |
|
|
else: |
|
|
append_jsonl(err_jsonl, res["err"]) |
|
|
err_cnt += 1 |
|
|
|
|
|
if processed % 100 == 0: |
|
|
print(f"Finished {processed}/{len(tasks)} | ok={ok_cnt} err={err_cnt}") |
|
|
|
|
|
print("Done.") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|