AstroVLBench / code /task4 /llm.py
anonymous4ai's picture
Add files using upload-large-folder tool
325693c verified
#!/usr/bin/env python3
"""
Task 4: Light Curve Classification (AGN / SNIa / TDE / RRL / Mira)
Classifies astronomical transients and variables from light curve images
or text-based multi-band flux data.
"""
import argparse
import base64
import csv
import json
import os
import pathlib
import re
import time
from typing import Optional
from dotenv import load_dotenv
load_dotenv(override=True)
from openai import OpenAI
# =========================
# CONFIGURATION
# =========================
DATA_DIR = pathlib.Path(__file__).resolve().parent.parent.parent / "data" / "Task4_LightCurve"
CLASSES = ["AGN", "SNIa", "TDE", "RRL", "Mira"]
# Passband index -> LSST filter name and central wavelength (nm)
PASSBAND_MAP = {
0: ("u", 365),
1: ("g", 480),
2: ("r", 620),
3: ("i", 750),
4: ("z", 870),
5: ("y", 1000),
}
# =========================
# CLIENT
# =========================
def get_client(model: str) -> OpenAI:
"""Create OpenAI-compatible client based on model name.
Requires environment variables:
- OPENAI_API_KEY / OPENAI_BASE_URL for OpenAI/compatible models
- CLAUDE_API_KEY for Claude models
- GROK_API_KEY for Grok models
- QWEN_API_KEY for Qwen models
- INTERN_API_KEY for InternVL models
"""
api_key = os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENAI_BASE_URL")
if "intern" in model.lower():
api_key = os.getenv("INTERN_API_KEY")
base_url = os.getenv("INTERN_BASE_URL")
elif "qwen" in model.lower():
api_key = os.getenv("QWEN_API_KEY")
base_url = os.getenv("QWEN_BASE_URL")
elif "grok" in model.lower():
api_key = os.getenv("GROK_API_KEY")
elif "claude" in model.lower():
api_key = os.getenv("CLAUDE_API_KEY")
return OpenAI(api_key=api_key, base_url=base_url)
# =========================
# IMAGE UTILS
# =========================
def encode_image(path: pathlib.Path) -> str:
with open(path, "rb") as f:
return base64.b64encode(f.read()).decode("utf-8")
# =========================
# DATA LOADING
# =========================
def load_catalog_image(data_dir: pathlib.Path = DATA_DIR):
samples = []
for cls in CLASSES:
fig_dir = data_dir / "figures" / cls
if not fig_dir.exists():
continue
for fig in sorted(fig_dir.glob("*.png")):
samples.append({
"oid": fig.stem,
"label": cls,
"image_path": fig,
})
return samples
def load_catalog_text(data_dir: pathlib.Path = DATA_DIR):
samples = []
for cls in CLASSES:
csv_dir = data_dir / "csv" / cls
if not csv_dir.exists():
continue
for csv_file in sorted(csv_dir.glob("*.csv")):
samples.append({
"oid": csv_file.stem,
"label": cls,
"csv_path": csv_file,
})
return samples
def format_lightcurve(csv_path: pathlib.Path) -> str:
rows = []
with csv_path.open(newline="") as f:
reader = csv.DictReader(f)
for row in reader:
rows.append(row)
if not rows:
return "No data available."
by_band: dict[int, list] = {}
for row in rows:
try:
pb = int(float(row["passband"]))
mjd = float(row["mjd"])
flux = float(row["flux"])
flux_err = float(row["flux_err"])
detected = int(float(row.get("detected_bool", 1)))
by_band.setdefault(pb, []).append((mjd, flux, flux_err, detected))
except (ValueError, KeyError):
continue
lines = []
for pb in sorted(by_band.keys()):
band_name, wave_nm = PASSBAND_MAP.get(pb, (f"band{pb}", 0))
obs = by_band[pb]
obs.sort(key=lambda x: x[0])
lines.append(f" {band_name}-band ({wave_nm} nm), {len(obs)} observations:")
for mjd, flux, flux_err, detected in obs[:50]:
det_str = "" if detected else " [non-detection]"
lines.append(f" MJD={mjd:.4f} flux={flux:.3f} ± {flux_err:.3f}{det_str}")
if len(obs) > 50:
lines.append(f" ... ({len(obs) - 50} more observations)")
return "\n".join(lines)
# =========================
# PROMPTS
# =========================
SYSTEM_PROMPT_IMAGE = """You are an expert astrophysicist classifying astronomical transients and variables based on their light curve morphology.
Classify the source into exactly one of these 5 categories:
**AGN** — Stochastic, red-noise variability driven by accretion disk fluctuations around a supermassive black hole. Continuous process with no defined periodicity, persisting over long epochs.
**SNIa** — Thermonuclear explosion of a white dwarf. Fast rise to peak brightness followed by a characteristic exponential decline powered by radioactive decay (weeks timescale). A one-time event.
**TDE** — Tidal Disruption Event: a star disrupted by a supermassive black hole. Rapid rise followed by a smooth power-law decline as stellar debris falls back. Sustained high-temperature emission.
**RRL** — RR Lyrae: short-period pulsating star with a period of hours to ~1 day. Strictly periodic, sawtooth-like light curve with rapid rise and slow decline.
**Mira** — Long-period pulsating AGB star. Periodic variability over months to years, with large amplitude and smooth sinusoidal-like variations.
**Output requirements:**
- Respond with a JSON object: {"answer": "", "reason": ""}
- The "answer" field must be exactly one of: AGN, SNIa, TDE, RRL, Mira
- The "reason" field should contain a brief explanation of your classification decision
- Do not include any text outside the JSON object
"""
SYSTEM_PROMPT_IMAGE_WOGUIDE = """Classify this light curve into one of: AGN, SNIa, TDE, RRL, Mira.
Output requirements:
- Respond with a JSON object: {"answer": "", "reason": ""}
- The "answer" field must be exactly one of: AGN, SNIa, TDE, RRL, Mira
- The "reason" field should contain a brief explanation of your classification decision
- Do not include any text outside the JSON object
"""
SYSTEM_PROMPT_TEXT = """You are an expert astrophysicist classifying astronomical transients and variables based solely on their underlying physical processes.
You will be given multi-band observational flux data (flux in arbitrary units vs. Modified Julian Date). Classify the target into exactly one of the 5 categories below. Evaluate the implied timescales, periodicity, event continuity, and behavior across wavelengths. Base your answer on physical deduction alone.
**AGN** — Stochastic, red-noise variability driven by accretion disk fluctuations around a supermassive black hole. Continuous process lacking any defined global periodicity, persisting over extremely long epochs.
**SNIa** — Thermonuclear explosion of a white dwarf at the Chandrasekhar mass limit. Powered by radioactive decay (⁵⁶Ni → ⁵⁶Co → ⁵⁶Fe). Rapid rise to peak, then characteristic exponential decline over weeks. A single, non-repeating event.
**TDE** — Tidal Disruption Event: a star disrupted by a dormant supermassive black hole. Steady fallback of stellar debris forms a temporary accretion disk. Rapid rise followed by a smooth power-law decline; sustained high-temperature emission.
**RRL** — RR Lyrae: low-mass horizontal branch star with rapid radial pulsations. Strictly periodic, with period of hours to ~1 day. Sawtooth-like profile with fast rise and slow decline.
**Mira** — Asymptotic Giant Branch star undergoing fundamental-mode radial pulsations. Large-amplitude, long-period variability spanning months to years. Smooth, quasi-sinusoidal variations.
**Output requirements:**
- Respond with a JSON object: {"answer": "", "reason": ""}
- The "answer" field must be exactly one of: AGN, SNIa, TDE, RRL, Mira
- The "reason" field should contain a brief explanation based on the light curve properties
- Do not include any text outside the JSON object
"""
USER_TEXT_IMAGE = "Classify this light curve: AGN, SNIa, TDE, RRL, or Mira. Respond with JSON format."
# =========================
# MODEL CALLS
# =========================
def classify_image(client: OpenAI, image_path: pathlib.Path, model: str, system_prompt: str, max_completion_tokens: int):
img_b64 = encode_image(image_path)
messages = [
{"role": "system", "content": system_prompt},
{
"role": "user",
"content": [
{"type": "text", "text": USER_TEXT_IMAGE},
{
"type": "image_url",
"image_url": {
"url": f"data:image/png;base64,{img_b64}",
"detail": "high",
},
},
],
},
]
extra = {"enable_thinking": False} if "qwen" in model.lower() else {}
for attempt in range(5):
try:
response = client.chat.completions.create(
model=model,
messages=messages,
temperature=0,
max_completion_tokens=max_completion_tokens,
extra_body=extra if extra else None,
)
return response
except Exception as e:
if attempt < 4:
wait = 2 ** attempt * 5
print(f" Attempt {attempt+1} failed ({e}), retrying in {wait}s...")
time.sleep(wait)
else:
raise
def classify_text(client: OpenAI, user_prompt: str, model: str, max_completion_tokens: int):
messages = [
{"role": "system", "content": SYSTEM_PROMPT_TEXT},
{"role": "user", "content": user_prompt},
]
extra = {"enable_thinking": False} if "qwen" in model.lower() else {}
for attempt in range(5):
try:
response = client.chat.completions.create(
model=model,
messages=messages,
temperature=0,
max_completion_tokens=max_completion_tokens,
extra_body=extra if extra else None,
)
return response
except Exception as e:
if attempt < 4:
wait = 2 ** attempt * 5
print(f" Attempt {attempt+1} failed ({e}), retrying in {wait}s...")
time.sleep(wait)
else:
raise
# =========================
# PARSE PREDICTION
# =========================
def parse_prediction(raw: str) -> dict:
cleaned = re.sub(r"```json\s*", "", raw)
cleaned = re.sub(r"```\s*", "", cleaned)
cleaned = cleaned.strip()
try:
return json.loads(cleaned)
except json.JSONDecodeError:
return {"answer": raw, "reason": ""}
def canonicalize_label(value: str) -> str:
val = (value or "").strip()
val_up = val.upper()
if "AGN" in val_up:
return "AGN"
if "SNIA" in val_up or "SN IA" in val_up or "TYPE IA" in val_up or val_up == "SN":
return "SNIa"
if "TDE" in val_up or "TIDAL" in val_up:
return "TDE"
if "RRL" in val_up or "RR LYR" in val_up:
return "RRL"
if "MIRA" in val_up:
return "Mira"
return "Unknown"
# =========================
# MAIN PIPELINE
# =========================
def run(
model: str,
limit: Optional[int],
results_dir: pathlib.Path,
modality: str,
prompt_type: str,
max_completion_tokens: int,
resume: bool,
data_dir: pathlib.Path = DATA_DIR,
) -> pathlib.Path:
client = get_client(model)
if modality == "image":
samples = load_catalog_image(data_dir)
else:
samples = load_catalog_text(data_dir)
print(f"Loaded {len(samples)} samples")
results_dir.mkdir(parents=True, exist_ok=True)
out_path = results_dir / f"predictions-{modality}-{prompt_type}-{model}.json"
results = []
processed_ids = set()
if resume and out_path.exists():
with out_path.open("r") as f:
results = json.load(f)
processed_ids = {r.get("oid") or r.get("image") for r in results}
print(f"Resuming from {len(results)} existing predictions")
correct = sum(r["correct"] for r in results)
total = len(results)
if prompt_type == "guided":
system_prompt = SYSTEM_PROMPT_IMAGE
else:
system_prompt = SYSTEM_PROMPT_IMAGE_WOGUIDE
for i, sample in enumerate(samples):
if limit is not None and i >= limit:
break
oid = sample["oid"]
if oid in processed_ids:
continue
label = sample["label"]
if modality == "image":
response = classify_image(client, sample["image_path"], model, system_prompt, max_completion_tokens)
else:
lightcurve_text = format_lightcurve(sample["csv_path"])
user_prompt = f"Multi-band light curve data:\n\n{lightcurve_text}\n\nBased on the physical processes described, classify this source. Respond with JSON format."
response = classify_text(client, user_prompt, model, max_completion_tokens)
content = response.choices[0].message.content
pred = parse_prediction(content)
answer = canonicalize_label(pred.get("answer", ""))
is_correct = answer == label
total += 1
correct += int(is_correct)
results.append({
"oid": oid,
"label": label,
"prediction": pred,
"correct": int(is_correct),
"raw_response": response.model_dump(),
})
print(f"{oid}: pred={answer} label={label} {'✓' if is_correct else '✗'}")
with out_path.open("w") as f:
json.dump(results, f, indent=2)
if total > 0:
print(f"Accuracy on {total} checked: {correct}/{total} = {correct/total:.2%}")
print(f"Saved predictions to {out_path}")
return out_path
# =========================
# ARGPARSE
# =========================
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Task4: Light Curve Classification")
parser.add_argument("--model", default="gpt-4o")
parser.add_argument("--modality", choices=["image", "text"], default="image")
parser.add_argument("--prompt-type", choices=["guided", "woguide"], default="guided")
parser.add_argument("--limit", type=int, default=None)
parser.add_argument("--results-dir", type=pathlib.Path, default=pathlib.Path("./results"))
parser.add_argument("--max-completion-tokens", type=int, default=16384)
parser.add_argument("--resume", action="store_true")
parser.add_argument("--data-dir", type=pathlib.Path, default=DATA_DIR)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
run(
model=args.model,
limit=args.limit,
results_dir=args.results_dir,
modality=args.modality,
prompt_type=args.prompt_type,
max_completion_tokens=args.max_completion_tokens,
resume=args.resume,
data_dir=args.data_dir,
)