AstroVLBench / code /task5 /llm.py
anonymous4ai's picture
Add files using upload-large-folder tool
325693c verified
#!/usr/bin/env python3
"""
Task 5: Spectral Type Classification (ASIB Benchmark)
Three questions evaluated:
Q1: Does the spectrum contain H-alpha and H-beta emission lines? (all 700 samples)
Q2: Is this a Broad-Line AGN (BLAGN)? (500 samples: groups C1-C4, D)
Q3: What is the BPT classification? (400 samples: groups C1-C4)
"""
import argparse
import base64
import csv
import json
import os
import pathlib
import re
import time
from typing import Optional, List, Dict
from dotenv import load_dotenv
load_dotenv(override=True)
from openai import OpenAI
# =========================
# CONFIGURATION
# =========================
DATA_DIR = pathlib.Path(__file__).resolve().parent.parent.parent / "data" / "Task5_SpecType"
# Groups eligible for each question
Q1_GROUPS = {"A", "B", "C1", "C2", "C3", "C4", "D"}
Q2_GROUPS = {"C1", "C2", "C3", "C4", "D"}
Q3_GROUPS = {"C1", "C2", "C3", "C4"}
# Ground truth
Q1_TRUTH = {"A": "False", "B": "False", "C1": "True", "C2": "True", "C3": "True", "C4": "True", "D": "True"}
Q2_TRUTH = {"C1": "False", "C2": "False", "C3": "False", "C4": "False", "D": "True"}
Q3_TRUTH = {"C1": "Star-Forming", "C2": "Composite", "C3": "Seyfert", "C4": "LINER"}
GROUP_TO_DIR = {
"A": "Group_A_High_Z_Trap",
"B": "Group_B_Low_Z_Weak",
"C1": "Group_C1_BPT_SF",
"C2": "Group_C2_BPT_Composite",
"C3": "Group_C3_BPT_Seyfert",
"C4": "Group_C4_BPT_LINER",
"D": "Group_D_Broad_Line_AGN",
}
# =========================
# CLIENT
# =========================
def get_client(model: str) -> OpenAI:
"""Create OpenAI-compatible client based on model name.
Requires environment variables:
- OPENAI_API_KEY / OPENAI_BASE_URL for OpenAI/compatible models
- CLAUDE_API_KEY for Claude models
- GROK_API_KEY for Grok models
- QWEN_API_KEY for Qwen models
- INTERN_API_KEY for InternVL models
"""
api_key = os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENAI_BASE_URL")
if "intern" in model.lower():
api_key = os.getenv("INTERN_API_KEY")
base_url = os.getenv("INTERN_BASE_URL")
elif "qwen" in model.lower():
api_key = os.getenv("QWEN_API_KEY")
base_url = os.getenv("QWEN_BASE_URL")
elif "grok" in model.lower():
api_key = os.getenv("GROK_API_KEY")
elif "claude" in model.lower():
api_key = os.getenv("CLAUDE_API_KEY")
return OpenAI(api_key=api_key, base_url=base_url)
# =========================
# IMAGE UTILS
# =========================
def encode_image(path: pathlib.Path) -> str:
with open(path, "rb") as f:
return base64.b64encode(f.read()).decode("utf-8")
# =========================
# DATA LOADING
# =========================
def load_samples(question: str, data_dir: pathlib.Path = DATA_DIR) -> List[Dict]:
csv_path = data_dir / "ASIB_v1_selection_with_snr.csv"
figures_root = data_dir / "figures"
eligible_groups = {"Q1": Q1_GROUPS, "Q2": Q2_GROUPS, "Q3": Q3_GROUPS}[question]
truth_map = {"Q1": Q1_TRUTH, "Q2": Q2_TRUTH, "Q3": Q3_TRUTH}[question]
samples = []
with csv_path.open(newline="") as f:
reader = csv.DictReader(f)
for row in reader:
group = row["SUB_GROUP"].strip()
if group not in eligible_groups:
continue
target_id = row["TARGETID"].strip()
group_dir = GROUP_TO_DIR[group]
fig_path = figures_root / group_dir / f"spectrum_{target_id}.png"
if not fig_path.exists():
continue
samples.append({
"target_id": target_id,
"group": group,
"label": truth_map[group],
"image_path": fig_path,
"z_conf": float(row.get("Z_CONF", 0) or 0),
"class_label": row.get("CLASS_LABEL", "").strip(),
})
return samples
# =========================
# PROMPTS
# =========================
SYSTEM_PROMPT_Q1 = """**Task:** Analyze this optical spectrum of a galaxy to determine whether the spectrum contains BOTH H-alpha (rest wavelength 6563 Angstroms) and H-beta (rest wavelength 4861 Angstroms) emission lines.
**Context:**
- The spectrum is shown in the observed frame
- H-alpha and H-beta may be redshifted out of the optical wavelength range or may be weak/non-detectable
- Emission lines appear as peaks above the continuum
**Output requirements:**
- Respond with a JSON object in the following format: {"answer": "", "reason": ""}
- The "answer" field must be either: True or False
- Answer True if both lines are present, False otherwise
- The "reason" field should contain a brief explanation
- Do not include any text outside the JSON object
"""
SYSTEM_PROMPT_Q2 = """**Task:** Analyze this optical spectrum of a galaxy to determine whether this object is a Broad-Line AGN (BLAGN).
**Context:**
- A BLAGN is characterized by broad emission lines (FWHM > 1000 km/s), particularly in H-alpha
- The broad line component is wider than typical narrow-line regions
- Type-1 AGN / Seyfert 1 classification
- The H-alpha feature could be blended with [N II] lines which may complicate the profile
- Look for asymmetric or broadened emission line profiles in the Balmer lines (H-alpha, H-beta)
**Output requirements:**
- Respond with a JSON object in the following format: {"answer": "", "reason": ""}
- The "answer" field must be either: True or False
- Answer True if this is a BLAGN, False otherwise
- The "reason" field should contain a brief explanation
- Do not include any text outside the JSON object
"""
SYSTEM_PROMPT_Q3 = """**Task:** Analyze this optical spectrum of a galaxy and classify it using the BPT (Baldwin-Phillips-Terlevich) diagnostic diagram.
**Context:**
The BPT diagram uses the line ratios:
- log([NII] 6584 / H-alpha) on the x-axis
- log([OIII] 5007 / H-beta) on the y-axis
The four classification regions are:
1. Star-Forming: Low [NII]/H-alpha and low [OIII]/H-beta (ionization dominated by young stars)
2. Composite: Intermediate region between SF and AGN (mixed ionization sources)
3. Seyfert: High [OIII]/H-beta, high [NII]/H-alpha (AGN-dominated ionization)
4. LINER: High [NII]/H-alpha, low [OIII]/H-beta (low-ionization nuclear emission region)
**Output requirements:**
- Respond with a JSON object in the following format: {"answer": "", "reason": ""}
- The "answer" field must be one of: Star-Forming, Composite, Seyfert, or LINER
- The "reason" field should contain a brief explanation
- Do not include any text outside the JSON object
"""
SYSTEM_PROMPT_Q1_WOGUIDE = """Does this spectrum contain both H-alpha and H-beta emission lines?
Output requirements:
- Respond with a JSON object: {"answer": "", "reason": ""}
- The "answer" field must be either: True or False
- Do not include any text outside the JSON object
"""
SYSTEM_PROMPT_Q2_WOGUIDE = """Is this a Broad-Line AGN (BLAGN)?
Output requirements:
- Respond with a JSON object: {"answer": "", "reason": ""}
- The "answer" field must be either: True or False
- Do not include any text outside the JSON object
"""
SYSTEM_PROMPT_Q3_WOGUIDE = """Classify this spectrum using the BPT diagram: Star-Forming, Composite, Seyfert, or LINER.
Output requirements:
- Respond with a JSON object: {"answer": "", "reason": ""}
- The "answer" field must be one of: Star-Forming, Composite, Seyfert, or LINER
- Do not include any text outside the JSON object
"""
USER_TEXT_Q1 = "Does this spectrum contain both H-alpha and H-beta emission lines? Respond with JSON format."
USER_TEXT_Q2 = "Is this a Broad-Line AGN (BLAGN)? Respond with JSON format."
USER_TEXT_Q3 = "Classify this spectrum: Star-Forming, Composite, Seyfert, or LINER. Respond with JSON format."
def get_prompts(question: str, prompt_type: str = "guided"):
if prompt_type == "guided":
prompts = {"Q1": SYSTEM_PROMPT_Q1, "Q2": SYSTEM_PROMPT_Q2, "Q3": SYSTEM_PROMPT_Q3}
else:
prompts = {"Q1": SYSTEM_PROMPT_Q1_WOGUIDE, "Q2": SYSTEM_PROMPT_Q2_WOGUIDE, "Q3": SYSTEM_PROMPT_Q3_WOGUIDE}
user_texts = {"Q1": USER_TEXT_Q1, "Q2": USER_TEXT_Q2, "Q3": USER_TEXT_Q3}
return prompts[question], user_texts[question]
# =========================
# MODEL CALL
# =========================
def classify_image(
client: OpenAI,
image_path: pathlib.Path,
model: str,
system_prompt: str,
user_text: str,
max_completion_tokens: int,
):
img_b64 = encode_image(image_path)
messages = [
{"role": "system", "content": system_prompt},
{
"role": "user",
"content": [
{"type": "text", "text": user_text},
{
"type": "image_url",
"image_url": {
"url": f"data:image/png;base64,{img_b64}",
"detail": "high",
},
},
],
},
]
extra = {"enable_thinking": False} if "qwen" in model.lower() else {}
for attempt in range(5):
try:
response = client.chat.completions.create(
model=model,
messages=messages,
temperature=0,
max_completion_tokens=max_completion_tokens,
extra_body=extra if extra else None,
)
return response
except Exception as e:
if attempt < 4:
wait = 2 ** attempt * 5
print(f" Attempt {attempt+1} failed ({e}), retrying in {wait}s...")
time.sleep(wait)
else:
raise
# =========================
# PARSE PREDICTION
# =========================
def parse_prediction(raw: str) -> dict:
cleaned = re.sub(r"```json\s*", "", raw)
cleaned = re.sub(r"```\s*", "", cleaned)
cleaned = cleaned.strip()
try:
return json.loads(cleaned)
except json.JSONDecodeError:
return {"answer": raw, "reason": ""}
def canonicalize_binary(value) -> str:
if isinstance(value, bool):
return "True" if value else "False"
val = str(value or "").strip().upper()
if val.startswith("TRUE") or val == "T":
return "True"
if val.startswith("FALSE") or val == "F":
return "False"
if "TRUE" in val and "FALSE" not in val:
return "True"
if "FALSE" in val and "TRUE" not in val:
return "False"
return "Unknown"
def canonicalize_bpt(value: str) -> str:
val = (value or "").strip().upper()
if "STAR-FORMING" in val or "STAR FORMING" in val or val == "SF":
return "Star-Forming"
if "COMPOSITE" in val:
return "Composite"
if "SEYFERT" in val:
return "Seyfert"
if "LINER" in val:
return "LINER"
return "Unknown"
def canonicalize_answer(question: str, value: str) -> str:
if question in ("Q1", "Q2"):
return canonicalize_binary(value)
return canonicalize_bpt(value)
# =========================
# MAIN PIPELINE
# =========================
def run(
question: str,
model: str,
limit: Optional[int],
results_dir: pathlib.Path,
prompt_type: str,
max_completion_tokens: int,
resume: bool,
data_dir: pathlib.Path = DATA_DIR,
) -> pathlib.Path:
client = get_client(model)
samples = load_samples(question, data_dir)
print(f"Loaded {len(samples)} samples for {question}")
results_dir.mkdir(parents=True, exist_ok=True)
out_path = results_dir / f"predictions-{question}-{prompt_type}-{model}.json"
system_prompt, user_text = get_prompts(question, prompt_type)
results = []
processed_images = set()
if resume and out_path.exists():
with out_path.open("r") as f:
results = json.load(f)
processed_images = {r["image"] for r in results}
print(f"Resuming from {len(results)} existing predictions")
correct = sum(r["correct"] for r in results)
total = len(results)
for i, sample in enumerate(samples):
if limit is not None and i >= limit:
break
image_path = sample["image_path"]
if str(image_path) in processed_images:
continue
label = sample["label"]
response = classify_image(client, image_path, model, system_prompt, user_text, max_completion_tokens)
content = response.choices[0].message.content
pred = parse_prediction(content)
answer = canonicalize_answer(question, pred.get("answer", ""))
is_correct = answer == label
total += 1
correct += int(is_correct)
results.append({
"image": str(image_path),
"target_id": sample["target_id"],
"group": sample["group"],
"label": label,
"prediction": pred,
"correct": int(is_correct),
"raw_response": response.model_dump(),
})
print(
f"{question} {sample['target_id']} (group={sample['group']}): "
f"pred={answer} label={label} "
f"{'✓' if is_correct else '✗'}"
)
with out_path.open("w") as f:
json.dump(results, f, indent=2)
if total > 0:
print(f"{question} Accuracy on {total} checked: {correct}/{total} = {correct/total:.2%}")
print(f"Saved predictions to {out_path}")
return out_path
# =========================
# ARGPARSE
# =========================
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Task5: Spectral Type Classification (ASIB)")
parser.add_argument("--question", choices=["Q1", "Q2", "Q3"], required=True)
parser.add_argument("--model", default="gpt-4o")
parser.add_argument("--prompt-type", choices=["guided", "woguide"], default="guided")
parser.add_argument("--limit", type=int, default=None)
parser.add_argument("--results-dir", type=pathlib.Path, default=pathlib.Path("./results"))
parser.add_argument("--max-completion-tokens", type=int, default=16384)
parser.add_argument("--resume", action="store_true")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
run(
question=args.question,
model=args.model,
limit=args.limit,
results_dir=args.results_dir,
prompt_type=args.prompt_type,
max_completion_tokens=args.max_completion_tokens,
resume=args.resume,
)