| |
| """ |
| Task 3: SED Classification (Type-1 AGN, Type-2 AGN, or Galaxy) |
| |
| Classifies astronomical sources from SED plots or text-based magnitude data. |
| Supports both image-based and text-based modalities, with/without redshift. |
| """ |
|
|
| import argparse |
| import base64 |
| import csv |
| import json |
| import os |
| import pathlib |
| import re |
| import time |
| from typing import Optional |
|
|
| from dotenv import load_dotenv |
| load_dotenv(override=True) |
|
|
| from openai import OpenAI |
|
|
|
|
| |
| |
| |
|
|
| DATA_DIR = pathlib.Path(__file__).resolve().parent.parent.parent / "data" / "Task3_SED" |
|
|
| |
| MAG_COLUMNS = [ |
| ("HSC-g", "HSC_G_MAG", "HSC_G_MAG_ERR", 0.48), |
| ("HSC-r", "HSC_R_MAG", "HSC_R_MAG_ERR", 0.62), |
| ("HSC-i", "HSC_I_MAG", "HSC_I_MAG_ERR", 0.77), |
| ("HSC-z", "HSC_Z_MAG", "HSC_Z_MAG_ERR", 0.91), |
| ("HSC-Y", "HSC_Y_MAG", "HSC_Y_MAG_ERR", 1.00), |
| ("Euclid-Y", "EUCLID_Y_MAG", "EUCLID_Y_MAG_ERR", 1.08), |
| ("Euclid-J", "EUCLID_J_MAG", "EUCLID_J_MAG_ERR", 1.25), |
| ("Euclid-H", "EUCLID_H_MAG", "EUCLID_H_MAG_ERR", 1.65), |
| ("AKARI-N2", "AKARI_N2_MAG", "AKARI_N2_MAG_ERR", 2.4), |
| ("AKARI-N3", "AKARI_N3_MAG", "AKARI_N3_MAG_ERR", 3.2), |
| ("AKARI-N4", "AKARI_N4_MAG", "AKARI_N4_MAG_ERR", 4.1), |
| ("AKARI-S7", "AKARI_S7_MAG", "AKARI_S7_MAG_ERR", 7.0), |
| ("AKARI-S9W", "AKARI_S9W_MAG", "AKARI_S9W_MAG_ERR", 9.0), |
| ("AKARI-S11", "AKARI_S11_MAG", "AKARI_S11_MAG_ERR", 11.0), |
| ("AKARI-L15", "AKARI_L15_MAG", "AKARI_L15_MAG_ERR", 15.0), |
| ("AKARI-L18", "AKARI_L18_MAG", "AKARI_L18_MAG_ERR", 18.0), |
| ("AKARI-L24", "AKARI_L24_MAG", "AKARI_L24_MAG_ERR", 24.0), |
| ("WISE-W1", "WISE_W1_MAG", "WISE_W1_MAG_ERR", 3.4), |
| ("WISE-W2", "WISE_W2_MAG", "WISE_W2_MAG_ERR", 4.6), |
| ("WISE-W3", "WISE_W3_MAG", "WISE_W3_MAG_ERR", 12.0), |
| ("WISE-W4", "WISE_W4_MAG", "WISE_W4_MAG_ERR", 22.0), |
| ] |
|
|
|
|
| |
| |
| |
|
|
| def get_client(model: str) -> OpenAI: |
| """Create OpenAI-compatible client based on model name. |
| |
| Requires environment variables: |
| - OPENAI_API_KEY / OPENAI_BASE_URL for OpenAI/compatible models |
| - CLAUDE_API_KEY for Claude models |
| - GROK_API_KEY for Grok models |
| - QWEN_API_KEY for Qwen models |
| - INTERN_API_KEY for InternVL models |
| """ |
| api_key = os.getenv("OPENAI_API_KEY") |
| base_url = os.getenv("OPENAI_BASE_URL") |
|
|
| if "intern" in model.lower(): |
| api_key = os.getenv("INTERN_API_KEY") |
| base_url = os.getenv("INTERN_BASE_URL") |
| elif "qwen" in model.lower(): |
| api_key = os.getenv("QWEN_API_KEY") |
| base_url = os.getenv("QWEN_BASE_URL") |
| elif "grok" in model.lower(): |
| api_key = os.getenv("GROK_API_KEY") |
| elif "claude" in model.lower(): |
| api_key = os.getenv("CLAUDE_API_KEY") |
|
|
| return OpenAI(api_key=api_key, base_url=base_url) |
|
|
|
|
| |
| |
| |
|
|
| def encode_image(path: pathlib.Path) -> str: |
| with open(path, "rb") as f: |
| return base64.b64encode(f.read()).decode("utf-8") |
|
|
|
|
| def get_image_media_type(path: pathlib.Path) -> str: |
| suffix = path.suffix.lower() |
| if suffix in [".jpg", ".jpeg"]: |
| return "image/jpeg" |
| elif suffix == ".png": |
| return "image/png" |
| return "image/png" |
|
|
|
|
| |
| |
| |
|
|
| SYSTEM_PROMPT_IMAGE = """**Task:** Classify the astronomical source shown in the attached SED plot as a 'Type-1 AGN', 'Type-2 AGN', or 'Galaxy'. |
| |
| **Context:** |
| - The plot shows nu_fnu (y-axis, log scale) vs. Wavelength (x-axis, log scale). |
| - The upper x-axis shows rest-frame wavelength and the lower x-axis shows observed wavelength. |
| - The HSC g, r, i, z, y bands, the Euclid Y, J, H bands, the AKARI N2, N3, N4, S7, S9W, S11, L15, L18, L24 bands, and the WISE W1, W2, W3, W4 bands are marked on the plot (if available). |
| - {REDSHIFT_BLOCK} |
| |
| **Instructions:** |
| Normal galaxies are entirely driven by stellar processes, peaking with starlight in the optical/NIR and star-heated cold dust in the MIR. Type 1 AGNs outshine their host galaxies across the board, dominated by the naked accretion disk in the UV/optical and the intensely heated inner dust torus in the MIR. Type 2 AGNs have their central engines hidden behind thick dust, leaving their UV/optical to appear as normal star-dominated host galaxies, while their MIR reveals the hidden monster via the glowing, re-radiating dust torus. |
| {REDSHIFT_INSTRUCTION} |
| |
| **Output requirements:** |
| - Respond with a JSON object in the following format: {{"answer": "", "reason": ""}} |
| - The "answer" field must be either: Type-1 AGN, Type-2 AGN, or Galaxy |
| - The "reason" field should contain a brief explanation of your classification decision |
| - Do not include any text outside the JSON object |
| """ |
|
|
| SYSTEM_PROMPT_WOGUIDE = """Classify the astronomical source shown in the attached SED plot as a 'Type-1 AGN', 'Type-2 AGN', or 'Galaxy'. |
| |
| - {REDSHIFT_BLOCK} |
| {REDSHIFT_INSTRUCTION} |
| |
| Output requirements: |
| - Respond with a JSON object in the following format: {{"answer": "", "reason": ""}} |
| - The "answer" field must be either: Type-1 AGN, Type-2 AGN, or Galaxy |
| - The "reason" field should contain a brief explanation of your classification decision |
| - Do not include any text outside the JSON object |
| """ |
|
|
|
|
| def format_photometry(row: dict) -> str: |
| lines = [] |
| for band_name, mag_col, err_col, wavelength in MAG_COLUMNS: |
| mag = row.get(mag_col, "") |
| err = row.get(err_col, "") |
| if mag and mag.strip() and mag.strip() not in ["", "nan", "NaN"]: |
| try: |
| mag_val = float(mag) |
| if err and err.strip() and err.strip() not in ["", "nan", "NaN"]: |
| err_val = float(err) |
| lines.append(f" {band_name} ({wavelength:.2f} um): {mag_val:.3f} +/- {err_val:.3f} mag") |
| else: |
| lines.append(f" {band_name} ({wavelength:.2f} um): {mag_val:.3f} mag") |
| except ValueError: |
| pass |
| return "\n".join(lines) if lines else " No photometric data available." |
|
|
|
|
| def build_text_prompt(photometry: str, redshift_mode: str, redshift: float, redshift_err: float) -> str: |
| if redshift_mode == "with": |
| if redshift_err and redshift_err > 0: |
| redshift_block = f"Redshift (z) = {redshift:.4f} +/- {redshift_err:.4f}" |
| else: |
| redshift_block = f"Redshift (z) = {redshift:.4f}" |
| redshift_instruction = "Use the redshift information to interpret rest-frame wavelengths where helpful." |
| else: |
| redshift_block = "Redshift: not provided." |
| redshift_instruction = "Do not assume redshift; base your reasoning on the observer-frame SED only." |
|
|
| return f"""**Task:** Classify the astronomical source based on the following photometric magnitude data as a 'Type-1 AGN', 'Type-2 AGN', or 'Galaxy'. |
| |
| **Photometric Data (observed-frame magnitudes):** |
| {photometry} |
| |
| **Context:** |
| - {redshift_block} |
| - Lower magnitude values indicate brighter flux at that wavelength. |
| |
| **Instructions:** |
| Normal galaxies are entirely driven by stellar processes, peaking with starlight in the optical/NIR and star-heated cold dust in the MIR. Type 1 AGNs outshine their host galaxies across the board, dominated by the naked accretion disk in the UV/optical and the intensely heated inner dust torus in the MIR. Type 2 AGNs have their central engines hidden behind thick dust, leaving their UV/optical to appear as normal star-dominated host galaxies, while their MIR reveals the hidden monster via the glowing, re-radiating dust torus. |
| {redshift_instruction} |
| |
| **Output requirements:** |
| - Respond with a JSON object in the following format: {{"answer": "", "reason": ""}} |
| - The "answer" field must be either: Type-1 AGN, Type-2 AGN, or Galaxy |
| - The "reason" field should contain a brief explanation of your classification decision |
| - Do not include any text outside the JSON object |
| """ |
|
|
|
|
| def build_image_prompt(redshift_mode: str, redshift: float, redshift_err: float, prompt_type: str = "guided") -> str: |
| if redshift_mode == "with": |
| if redshift_err and redshift_err > 0: |
| redshift_block = f"Redshift (z) = {redshift:.4f} ± {redshift_err:.4f}" |
| else: |
| redshift_block = f"Redshift (z) = {redshift:.4f}" |
| redshift_instruction = "\nUse the redshift information to interpret rest-frame wavelengths where helpful." |
| else: |
| redshift_block = "Redshift: not provided." |
| redshift_instruction = "\nDo not assume redshift; base your reasoning on the observer-frame SED only." |
|
|
| template = SYSTEM_PROMPT_IMAGE if prompt_type == "guided" else SYSTEM_PROMPT_WOGUIDE |
| return template.format( |
| REDSHIFT_BLOCK=redshift_block, |
| REDSHIFT_INSTRUCTION=redshift_instruction, |
| ) |
|
|
|
|
| USER_TEXT = "Label this image: Type-1 AGN, Type-2 AGN, or Galaxy. Respond with JSON format." |
|
|
|
|
| |
| |
| |
|
|
| def classify_image(client: OpenAI, image_path: pathlib.Path, system_prompt: str, model: str, max_completion_tokens: int): |
| img_b64 = encode_image(image_path) |
| media_type = get_image_media_type(image_path) |
|
|
| messages = [ |
| {"role": "system", "content": system_prompt}, |
| { |
| "role": "user", |
| "content": [ |
| {"type": "text", "text": USER_TEXT}, |
| { |
| "type": "image_url", |
| "image_url": { |
| "url": f"data:{media_type};base64,{img_b64}", |
| "detail": "high", |
| }, |
| }, |
| ], |
| }, |
| ] |
|
|
| extra = {"enable_thinking": False} if "qwen" in model.lower() else {} |
| for attempt in range(5): |
| try: |
| response = client.chat.completions.create( |
| model=model, |
| messages=messages, |
| temperature=0, |
| max_completion_tokens=max_completion_tokens, |
| extra_body=extra if extra else None, |
| ) |
| return response |
| except Exception as e: |
| if attempt < 4: |
| wait = 2 ** attempt * 5 |
| print(f" Attempt {attempt+1} failed ({e}), retrying in {wait}s...") |
| time.sleep(wait) |
| else: |
| raise |
|
|
|
|
| def classify_text(client: OpenAI, system_prompt: str, model: str, max_completion_tokens: int): |
| messages = [ |
| {"role": "system", "content": system_prompt}, |
| {"role": "user", "content": "Label this source: Type-1 AGN, Type-2 AGN, or Galaxy. Respond with JSON format."}, |
| ] |
|
|
| extra = {"enable_thinking": False} if "qwen" in model.lower() else {} |
| for attempt in range(5): |
| try: |
| response = client.chat.completions.create( |
| model=model, |
| messages=messages, |
| temperature=0, |
| max_completion_tokens=max_completion_tokens, |
| extra_body=extra if extra else None, |
| ) |
| return response |
| except Exception as e: |
| if attempt < 4: |
| wait = 2 ** attempt * 5 |
| print(f" Attempt {attempt+1} failed ({e}), retrying in {wait}s...") |
| time.sleep(wait) |
| else: |
| raise |
|
|
|
|
| |
| |
| |
|
|
| def parse_prediction(raw: str) -> dict: |
| cleaned = re.sub(r"```json\s*", "", raw) |
| cleaned = re.sub(r"```\s*", "", cleaned) |
| cleaned = cleaned.strip() |
| try: |
| return json.loads(cleaned) |
| except json.JSONDecodeError: |
| return {"answer": raw, "reason": ""} |
|
|
|
|
| def canonicalize_label(value: str) -> str: |
| val = (value or "").strip().lower() |
| if "type-1" in val or "type 1" in val: |
| return "Type-1 AGN" |
| if "type-2" in val or "type 2" in val: |
| return "Type-2 AGN" |
| if "galaxy" in val or "sfg" in val or "star" in val: |
| return "Galaxy" |
| return "Unknown" |
|
|
|
|
| |
| |
| |
|
|
| def run( |
| catalog_path: pathlib.Path, |
| images_dir: pathlib.Path, |
| model: str, |
| limit: Optional[int], |
| results_dir: pathlib.Path, |
| redshift_mode: str, |
| modality: str, |
| prompt_type: str, |
| max_completion_tokens: int, |
| resume: bool, |
| ) -> pathlib.Path: |
|
|
| client = get_client(model) |
|
|
| rows = list(csv.DictReader(catalog_path.open())) |
| results_dir.mkdir(parents=True, exist_ok=True) |
|
|
| out_path = results_dir / f"predictions-{modality}-{prompt_type}-{model}-{redshift_mode}.json" |
|
|
| results = [] |
| processed_targets = set() |
| if resume and out_path.exists(): |
| with out_path.open("r") as f: |
| results = json.load(f) |
| processed_targets = {r["targetid"] for r in results if "prediction" in r} |
| print(f"Resuming from {len(results)} existing predictions") |
|
|
| correct = sum(r.get("correct", False) for r in results) |
| total = len(results) |
|
|
| for i, row in enumerate(rows): |
| if limit is not None and i >= limit: |
| break |
|
|
| targetid = str(row.get("targetid", "")).strip() |
| if not targetid: |
| continue |
| if targetid in processed_targets: |
| continue |
|
|
| label = (row.get("class") or "").strip() |
| redshift = float(row.get("z", 0.0) or 0.0) |
| redshift_err = float(row.get("zerr", 0.0) or 0.0) |
|
|
| if modality == "image": |
| matches = list(images_dir.glob(f"*_{targetid}.png")) |
| if not matches: |
| print(f"Warning: image not found for {targetid}") |
| continue |
| image_path = matches[0].resolve() |
| system_prompt = build_image_prompt(redshift_mode, redshift, redshift_err, prompt_type) |
| response = classify_image(client, image_path, system_prompt, model, max_completion_tokens) |
| else: |
| photometry = format_photometry(row) |
| system_prompt = build_text_prompt(photometry, redshift_mode, redshift, redshift_err) |
| response = classify_text(client, system_prompt, model, max_completion_tokens) |
|
|
| content = response.choices[0].message.content |
| pred = parse_prediction(content) |
| answer = canonicalize_label(pred.get("answer", "")) |
| is_correct = answer.lower() == label.lower() |
|
|
| total += 1 |
| correct += int(is_correct) |
|
|
| results.append({ |
| "targetid": targetid, |
| "label": label, |
| "prediction": pred, |
| "correct": int(is_correct), |
| "raw_response": response.model_dump(), |
| }) |
| print(f"{targetid}: pred={answer} label={label} {'✓' if is_correct else '✗'}") |
|
|
| with out_path.open("w") as f: |
| json.dump(results, f, indent=2) |
|
|
| if total > 0: |
| print(f"Accuracy on {total} checked: {correct}/{total} = {correct/total:.2%}") |
|
|
| print(f"Saved predictions to {out_path}") |
| return out_path |
|
|
|
|
| |
| |
| |
|
|
| def parse_args() -> argparse.Namespace: |
| parser = argparse.ArgumentParser(description="Task3: SED Classification") |
| parser.add_argument("--catalog", type=pathlib.Path, default=DATA_DIR / "nirsed_v2_catalog.csv") |
| parser.add_argument("--images-dir", type=pathlib.Path, default=DATA_DIR / "images") |
| parser.add_argument("--model", default="gpt-4o") |
| parser.add_argument("--modality", choices=["image", "text"], default="image") |
| parser.add_argument("--prompt-type", choices=["guided", "woguide"], default="guided") |
| parser.add_argument("--redshift-mode", choices=["with", "without"], default="with") |
| parser.add_argument("--limit", type=int, default=None) |
| parser.add_argument("--results-dir", type=pathlib.Path, default=pathlib.Path("./results")) |
| parser.add_argument("--max-completion-tokens", type=int, default=16384) |
| parser.add_argument("--resume", action="store_true") |
| return parser.parse_args() |
|
|
|
|
| if __name__ == "__main__": |
| args = parse_args() |
| run( |
| catalog_path=args.catalog, |
| images_dir=args.images_dir, |
| model=args.model, |
| limit=args.limit, |
| results_dir=args.results_dir, |
| redshift_mode=args.redshift_mode, |
| modality=args.modality, |
| prompt_type=args.prompt_type, |
| max_completion_tokens=args.max_completion_tokens, |
| resume=args.resume, |
| ) |
|
|