|
|
import collections |
|
|
import io |
|
|
import json |
|
|
import logging |
|
|
import os |
|
|
|
|
|
from capture_metric.capture import CAPTURE |
|
|
from PIL import Image |
|
|
from pycocoevalcap.eval import Bleu, Cider, COCOEvalCap, Meteor, Rouge, Spice |
|
|
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer |
|
|
from pycocotools.coco import COCO |
|
|
|
|
|
from lmms_eval.tasks._task_utils.file_utils import generate_submission_file |
|
|
|
|
|
eval_logger = logging.getLogger("lmms-eval") |
|
|
|
|
|
dir_name = os.path.dirname(os.path.abspath(__file__)) |
|
|
|
|
|
detailcaps_METRICS = ["CAPTURE", "Bleu_4", "Bleu_3", "Bleu_2", "Bleu_1", "METEOR", "ROUGE_L", "CIDEr"] |
|
|
|
|
|
|
|
|
def detailcaps_doc_to_visual(doc): |
|
|
return [Image.open(io.BytesIO(doc["binary"])).convert("RGB")] |
|
|
|
|
|
|
|
|
def detailcaps_doc_to_text(doc, lmms_eval_specific_kwargs=None): |
|
|
|
|
|
return lmms_eval_specific_kwargs["prompt"] |
|
|
|
|
|
|
|
|
def detailcaps_doc_to_target(doc): |
|
|
references = [ |
|
|
doc["GT_Caption_GPT4O"], |
|
|
doc["GT_Caption_GPT4V"], |
|
|
doc["GT_Caption_Gemini15Pro"], |
|
|
] |
|
|
return references |
|
|
|
|
|
|
|
|
def detailcaps_process_result(doc, result): |
|
|
""" |
|
|
Args: |
|
|
doc: a instance of the eval dataset |
|
|
results: [pred] |
|
|
Returns: |
|
|
a dictionary with key: metric name, value: metric value |
|
|
""" |
|
|
|
|
|
pred = result[0] |
|
|
|
|
|
image_id = doc["image"] |
|
|
|
|
|
data_dict = {"answer": detailcaps_doc_to_target(doc), "pred": pred, "image_id": image_id} |
|
|
|
|
|
return {f"detailcaps_{metric}": data_dict for metric in detailcaps_METRICS} |
|
|
|
|
|
|
|
|
def check_if_context_is_set(expected_context="spawn"): |
|
|
|
|
|
default_context_name = mp.get_context().get_start_method() |
|
|
|
|
|
|
|
|
is_set_to_expected = default_context_name == expected_context |
|
|
|
|
|
return is_set_to_expected |
|
|
|
|
|
|
|
|
def detailcaps_aggregation_result(results, metric, args=None): |
|
|
scorers = [(Bleu(4), "Bleu_1"), (Bleu(4), "Bleu_2"), (Bleu(4), "Bleu_3"), (Bleu(4), "Bleu_4"), (Meteor(), "METEOR"), (Rouge(), "ROUGE_L"), (Cider(), "CIDEr"), (CAPTURE(), "CAPTURE")] |
|
|
scorers_dict = {s[1]: s for s in scorers} |
|
|
|
|
|
stored_results = [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dataset = {"annotations": [], "images": []} |
|
|
idx = 0 |
|
|
|
|
|
for result in results: |
|
|
stored_results.append({"image_id": result["image_id"], "caption": result["pred"]}) |
|
|
for a in result["answer"]: |
|
|
dataset["annotations"].append({"image_id": result["image_id"], "caption": a, "id": idx}) |
|
|
idx += 1 |
|
|
dataset["images"].append({"id": result["image_id"]}) |
|
|
|
|
|
coco = COCO() |
|
|
|
|
|
coco.dataset = dataset |
|
|
coco.createIndex() |
|
|
|
|
|
detailcaps_result = coco.loadRes(stored_results) |
|
|
detailcaps_eval = COCOEvalCap(coco, detailcaps_result) |
|
|
|
|
|
imgIds = detailcaps_eval.params["image_id"] |
|
|
gts = {} |
|
|
res = {} |
|
|
for imgId in imgIds: |
|
|
gts[imgId] = detailcaps_eval.coco.imgToAnns[imgId] |
|
|
res[imgId] = detailcaps_eval.cocoRes.imgToAnns[imgId] |
|
|
|
|
|
eval_logger.info("tokenization...") |
|
|
tokenizer = PTBTokenizer() |
|
|
|
|
|
if metric == "CAPTURE": |
|
|
reorg_gts, reorg_res = collections.defaultdict(list), collections.defaultdict(list) |
|
|
for _, samples in gts.items(): |
|
|
for sample in samples: |
|
|
reorg_gts[sample["image_id"]].append(sample["caption"]) |
|
|
for _, samples in res.items(): |
|
|
for sample in samples: |
|
|
reorg_res[sample["image_id"]].append(sample["caption"]) |
|
|
gts, res = reorg_gts, reorg_res |
|
|
else: |
|
|
gts = tokenizer.tokenize(gts) |
|
|
res = tokenizer.tokenize(res) |
|
|
|
|
|
eval_logger.info(f"Computing {metric} scores...") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
score, scores = scorers_dict[metric][0].compute_score(gts, res) |
|
|
|
|
|
if type(score) == list: |
|
|
n = int(metric.split("_")[-1]) |
|
|
score = score[n - 1] |
|
|
|
|
|
path = generate_submission_file(f"detailcaps_val_{metric}_scores.json", args) |
|
|
eval_logger.info("Storing prediction that can be submitted to the server ...") |
|
|
with open(path, "w") as f: |
|
|
json.dump(stored_results, f, indent=4) |
|
|
eval_logger.info(f"Your result has been saved to {path}.") |
|
|
|
|
|
return score |
|
|
|
|
|
|
|
|
def detailcaps_bleu4(results, args=None): |
|
|
return detailcaps_aggregation_result(results, "Bleu_4", args) |
|
|
|
|
|
|
|
|
def detailcaps_bleu3(results, args=None): |
|
|
return detailcaps_aggregation_result(results, "Bleu_3", args) |
|
|
|
|
|
|
|
|
def detailcaps_bleu2(results, args=None): |
|
|
return detailcaps_aggregation_result(results, "Bleu_2", args) |
|
|
|
|
|
|
|
|
def detailcaps_bleu1(results, args=None): |
|
|
return detailcaps_aggregation_result(results, "Bleu_1", args) |
|
|
|
|
|
|
|
|
def detailcaps_meteor(results, args=None): |
|
|
return detailcaps_aggregation_result(results, "METEOR", args) |
|
|
|
|
|
|
|
|
def detailcaps_rougel(results, args=None): |
|
|
return detailcaps_aggregation_result(results, "ROUGE_L", args) |
|
|
|
|
|
|
|
|
def detailcaps_cider(results, args=None): |
|
|
return detailcaps_aggregation_result(results, "CIDEr", args) |
|
|
|
|
|
|
|
|
def detailcaps_spice(results, args=None): |
|
|
return detailcaps_aggregation_result(results, "SPICE", args) |
|
|
|
|
|
|
|
|
def detailcaps_capture(results, args=None): |
|
|
return detailcaps_aggregation_result(results, "CAPTURE", args) |
|
|
|
|
|
|
|
|
def detailcaps_test_process_result(doc, result): |
|
|
""" |
|
|
Args: |
|
|
doc: a instance of the eval dataset |
|
|
results: [pred] |
|
|
Returns: |
|
|
a dictionary with key: metric name (in this case detailcaps_passthrough), value: metric value |
|
|
""" |
|
|
return {"detailcaps_passthrough": {"pred": result[0], "image_id": doc["image_id"]}} |
|
|
|
|
|
|
|
|
def detailcaps_test_aggregation_result(results, args=None): |
|
|
stored_results = [] |
|
|
for result in results: |
|
|
stored_results.append({"image_id": int(result["image_id"]), "caption": result["pred"]}) |
|
|
|
|
|
path = generate_submission_file("detailcaps_captions_detailcaps_test_alg_results.json", args) |
|
|
eval_logger.info("Storing prediction that can be submitted to the server ...") |
|
|
with open(path, "w") as f: |
|
|
json.dump(stored_results, f, indent=4) |
|
|
|
|
|
eval_logger.info(f"Your test result has been stored in {path}. Make sure you also have the val result stored to submit to the server on https://codalab.lisn.upsaclay.fr/competitions/7404#participate.") |
|
|
|