diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/charxiv.py b/VLMEvalKit-sudoku/vlmeval/dataset/charxiv.py new file mode 100644 index 0000000000000000000000000000000000000000..3a3c01e13dc8d9db4a533a82d30bcfc07c8c5cc0 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/charxiv.py @@ -0,0 +1,256 @@ +import os +import json +from typing import Dict, List, Tuple, Any, Union +import pandas as pd +import warnings + +from vlmeval.dataset.image_base import ImageBaseDataset +from vlmeval.smp import misc, file +from vlmeval.smp.file import get_intermediate_file_path +from vlmeval import utils +from vlmeval.dataset.utils import build_judge + + +def auxeval(judge_model: Any, line: pd.Series, **kwargs: Any) -> Dict[str, Any]: + """ + Evaluate a line using the judge model. + + Args: + judge_model: The model used for evaluation + line: A pandas Series containing the data to evaluate + **kwargs: Additional arguments for the judge model + + Returns: + Dict containing evaluation results with extract_answer and score + """ + failure_result = {"extract_answer": "Failed to parse response", "score": 0.0} + prompt = line["grading_query"].replace("{PREDICTION}", line["prediction"]) + + retry = kwargs.get("retry", 10) + max_tokens = kwargs.get("max_tokens", 256) + temperature = kwargs.get("temperature", 0) + seed = kwargs.get("seed", 42) + top_p = kwargs.get("top_p", 1) + + for _ in range(retry): + try: + response = judge_model.generate( + prompt, + temperature=temperature, + max_tokens=max_tokens, + seed=seed, + top_p=top_p, + ) + content = json.loads(response) + if not isinstance(content, dict): + return failure_result + if "score" not in content or "extract_answer" not in content: + return failure_result + return content + except Exception: + continue + + return failure_result + + +def qid2category(mode: str) -> Tuple[Dict[int, str], str]: + """ + Map question IDs to their categories based on the evaluation mode. + + Args: + mode: Either "descriptive" or "reasoning" + + Returns: + Tuple containing a mapping dictionary and the index column name + + Raises: + ValueError: If the mode is not recognized + """ + if mode == "descriptive": + index_col = "qid" + return { + 1: "Information Extraction", + 2: "Information Extraction", + 3: "Information Extraction", + 4: "Information Extraction", + 5: "Information Extraction", + 6: "Information Extraction", + 7: "Information Extraction", + 8: "Enumeration", + 9: "Enumeration", + 10: "Counting", + 11: "Pattern Recognition", + 12: "Counting", + 13: "Enumeration", + 14: "Enumeration", + 15: "Enumeration", + 16: "Pattern Recognition", + 17: "Compositionality", + 18: "Pattern Recognition", + 19: "Counting", + }, index_col + elif mode == "reasoning": + index_col = "inst_category" + return { + 1: "Text-in-Chart", + 2: "Text-in-General", + 3: "Number-in-Chart", + 4: "Number-in-General", + }, index_col + else: + raise ValueError(f"Invalid mode: {mode}") + + +class CharXiv(ImageBaseDataset): + TYPE = "VQA" + DATASET_URL = { + "CharXiv_descriptive_val": "http://opencompass.openxlab.space/utils/VLMEval/CharXiv_descriptive_val.tsv", + "CharXiv_reasoning_val": "http://opencompass.openxlab.space/utils/VLMEval/CharXiv_reasoning_val.tsv", + } + DATASET_MD5 = { + "CharXiv_descriptive_val": "e165037032f169a59dd09ea5d7ad3073", + "CharXiv_reasoning_val": "98eeff269b40726982627b19338ccd45", + } + + def build_prompt(self, line: Union[int, pd.Series]) -> List[Dict[str, str]]: + """ + Build a prompt for the model from a data line. + + Args: + line: Either an index into the dataset or a pandas Series + + Returns: + List of message dictionaries containing the image and question + """ + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = misc.toliststr(line["image"]) + else: + tgt_path = self.dump_image(line) + + messages = [{"type": "image", "value": tgt_path[0]}] + messages.append({"type": "text", "value": line["question"]}) + return messages + + def get_scores(self, result_file: str) -> pd.DataFrame: + """ + Calculate scores by category from evaluation results. + + Args: + result_file: Path to the file containing evaluation results + + Returns: + DataFrame with scores for each category and overall score + + Raises: + ValueError: If the dataset name is invalid + """ + data = file.load(result_file) + + if "descriptive" in self.dataset_name: + mode = "descriptive" + elif "reasoning" in self.dataset_name: + mode = "reasoning" + else: + raise ValueError(f"Invalid dataset name: {self.dataset_name}") + + category_map, index_col = qid2category(mode) + + # Group scores by category + scores_by_category = {} + for _, row in data.iterrows(): + category = category_map[row[index_col]] + if category not in scores_by_category: + scores_by_category[category] = [] + scores_by_category[category].append(row["score"]) + + # Calculate average score for each category + result = {} + for category, scores in scores_by_category.items(): + result[category] = [sum(scores) / len(scores)] + + # Calculate overall score + result["Overall"] = [ + sum(sum(scores) for scores in scores_by_category.values()) / len(data) + ] + + return pd.DataFrame(result) + + def evaluate(self, eval_file: str, **judge_kwargs: Any) -> pd.DataFrame: + """ + Evaluate model predictions on the CharXiv dataset. + + Args: + eval_file: Path to the file containing model predictions + **judge_kwargs: Additional arguments for the judge model + + Returns: + DataFrame with evaluation scores by category + """ + # Set up judge model + if "LOCAL_LLM" in os.environ: + judge_model = os.path.basename(os.environ.get("LOCAL_LLM")) + else: + judge_model = judge_kwargs.get("model", "gpt-4o-mini") + + if judge_model != "gpt-4o-mini": + warnings.warn( + f"The judge_model '{judge_model}' is not gpt-4o-mini. Evaluation results may not be accurate." + ) + + judge_model = build_judge(model=judge_model, **judge_kwargs) + judge_model_name = judge_model.model + + # Define file paths + result_file = get_intermediate_file_path(eval_file, f"_{judge_model_name}") + temp_result_file = get_intermediate_file_path(eval_file, f"_{judge_model_name}", "pkl") + score_file = get_intermediate_file_path(result_file, "_acc", "csv") + + # Return existing results if available + if os.path.exists(result_file): + score = self.get_scores(result_file) + file.dump(score, score_file) + return score + + data = file.load(eval_file) + if "score" not in data.columns: + data["score"] = 0 + if "extract_answer" not in data.columns: + data["extract_answer"] = "" + + # Load intermediate results if available + processed_results = {} + if os.path.exists(temp_result_file): + processed_results = file.load(temp_result_file) + + # Identify unprocessed indices + indices = [i for i in range(len(data)) if i not in processed_results] + tups = [(judge_model, data.iloc[i]) for i in range(len(data)) if i not in processed_results] + + # Process remaining examples + nproc = judge_kwargs.pop("nproc", 4) + if len(indices): + utils.track_progress_rich( + auxeval, + tups, + nproc=nproc, + chunksize=nproc, + keys=indices, + save=temp_result_file, + **judge_kwargs, + ) + processed_results = file.load(temp_result_file) + + # Update data with evaluation results + data["score"] = data.apply(lambda x: processed_results[x.name]["score"], axis=1) + data["extract_answer"] = data.apply( + lambda x: processed_results[x.name]["extract_answer"], axis=1 + ) + + # Save results and return scores + file.dump(data, result_file) + score = self.get_scores(result_file) + file.dump(score, score_file) + return score diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/spatial457.py b/VLMEvalKit-sudoku/vlmeval/dataset/spatial457.py new file mode 100644 index 0000000000000000000000000000000000000000..4026e4c1ca5312bfebdeb3e36bace55bbb5680f1 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/spatial457.py @@ -0,0 +1,212 @@ +import os +import re +import tempfile +from functools import partial + +import pandas as pd + +from .image_base import ImageBaseDataset +from .utils import build_judge, DEBUG_MESSAGE, Spatial457_utils +from ..smp import * +from ..utils import track_progress_rich + + +class Spatial457(ImageBaseDataset): + TYPE = "VQA" + # When ROBUST is True, if the models does not follow the format, all of the response will be treated as answers. + ROBUST = True + + DATASET_URL = { + "Spatial457": "http://opencompass.openxlab.space/utils/VLMEval/Spatial457.tsv", + } + + DATASET_MD5 = { + 'Spatial457': "1f24f5a7b2cadc3d33a8a66ecf92ca68" + } + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.dataset_utils = Spatial457_utils() + + def evaluate(self, eval_file, **judge_kwargs): + + data = load(eval_file) + data['prediction'] = [str(x) for x in data['prediction']] + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + + all_results = { + "correct": 0, + "total": 0, + "answers": [], + "format_error": 0, + "L1_single": 0, + "L2_objects": 0, + "L3_2d_spatial": 0, + "L4_occ": 0, + "L4_pose": 0, + "L5_6d_spatial": 0, + "L5_collision": 0, + "L1_single_correct": 0, + "L2_objects_correct": 0, + "L3_2d_spatial_correct": 0, + "L4_occ_correct": 0, + "L4_pose_correct": 0, + "L5_6d_spatial_correct": 0, + "L5_collision_correct": 0, + } + + for i in tqdm(range(len(lines))): + + line = lines[i] + index = int(line["index"]) + + answers = str(line["answer"]) + level = line["category"] + objects = [] + + # parse the answer + pred_try_1 = re.search(r"Answer': '(.*?)'", line["prediction"]) + pred_try_2 = re.search(r'Answer": "(.*?)"', line["prediction"]) + pred_try_3 = re.search(r"Answer': (\d)", line["prediction"]) + + if pred_try_1: + pred = pred_try_1.group(1) + elif pred_try_2: + pred = pred_try_2.group(1) + elif pred_try_3: + pred = pred_try_3.group(1) + else: + if self.ROBUST: + pred = line['prediction'] + else: + pred = self.dataset_utils.get_random_answer(answers) + all_results["format_error"] += 1 + + reasoning_try_1 = re.search(r"Reasoning': '(.*?)'", line["prediction"]) + reasoning_try_2 = re.search(r'Reasoning": "(.*?)"', line["prediction"]) + + if reasoning_try_1: + reasoning = reasoning_try_1.group(1) + elif reasoning_try_2: + reasoning = reasoning_try_2.group(1) + else: + if self.ROBUST: + reasoning = "Format Error. All of the resposne as the answer." + else: + reasoning = "Format Error. Guess a random answer." + + correct = self.dataset_utils.is_correct(answers, pred) + + all_results["answers"].append( + { + "index": index, + "correct": correct, + "answers": answers, + "predict": pred, + "reasoning": reasoning, + "objects": objects, + } + ) + + all_results["total"] += 1 + if correct: + all_results["correct"] += 1 + + all_results[f"{level}"] += 1 + if correct: + all_results[f"{level}_correct"] += 1 + + all_results["score"] = all_results["correct"] / all_results["total"] + + for level in [ + "L1_single", + "L2_objects", + "L3_2d_spatial", + "L4_occ", + "L4_pose", + "L5_6d_spatial", + "L5_collision", + ]: + all_results[f"{level}_score"] = ( + all_results[f"{level}_correct"] / all_results[level] if all_results[level] > 0 else 0 + ) + + score_pth = get_intermediate_file_path(eval_file, "_score", "json") + + dump(all_results, score_pth) + return all_results + + def build_prompt(self, line): + msgs = super().build_prompt(line) + + set_type = line["category"] + + instruction_1, instruction_2 = self.build_subtask_instruction(set_type) + + msgs.insert(0, {"type": "text", "value": instruction_1}) + msgs.append({"type": "text", "value": instruction_2}) + + return msgs + + def build_subtask_instruction(self, level): + + task_map = { + "L1_single": ( + "You are an intelligent chatbot designed to answer questions based on an image. Your task is to " + "analyze the images, identify attributes of the objects, " + "and then determine the answer to the question.\n" + ), + "L2_objects": ( + "You are an intelligent chatbot designed to answer questions based on an image. Your task is to " + "analyze the images, identify attributes of multiple objects, " + "and then determine the answer to the question.\n" + ), + "L3_2d_spatial": ( + "You are an intelligent chatbot designed to answer questions based on an image. Your task is to " + "analyze the images, identify attributes of multiple objects and their spatial relationship from 2D " + "projected camera view, and then determine the answer to the question.\n" + ), + "L4_occ": ( + "You are an intelligent chatbot designed to answer questions based on an image. Your task is to " + "analyze the images, identify attributes of multiple objects and their occlusion relationships, and " + "then determine the answer to the question.\n" + ), + "L4_pose": ( + "You are an intelligent chatbot designed to answer questions based on an image. Your task is to " + "analyze the images, identify attributes of multiple objects and their facing direction in 3D space " + "from the camera view, and then determine the answer to the question.\n" + ), + "L5_6d_spatial": ( + "You are an intelligent chatbot designed to answer questions based on an image. Your task is to " + "analyze the images, identify attributes of multiple objects and their spatial relationship from " + "objects’ perspective in 3D space, and then determine the answer to the question.\n" + ), + "L5_collision": ( + "You are an intelligent chatbot designed to answer questions based on an image. Your task is to " + "analyze the images, identify attributes of multiple objects and their potential collision given the " + "assumption of moving direction in 3D space, and then determine the answer to the question.\n" + ), + } + + instruction_1 = task_map.get(level, "") + + instruction_2 = ( + "First, you should identify the related objects refered in the questions, including their shape, " + "color, size; then add a brief reasoning process about the questions. Each object in the image has a " + "shape (e.g., 'airliner'), a size (only can be 'small' or 'large'), a color (e.g. 'blue'). The size of " + "the object is either 'small' or 'large'. The color of the object is one of the following: 'gray', " + "'blue', 'purple', 'brown', 'green', 'cyan', 'red', 'yellow'. The direction of the object is one of the " + "following: 'left', 'right', 'front', 'back'.\n\n" + "Second, give the answer based on the reasoning process. The answer should only be (1) a phrase chosen " + "from the following options: {}, or (2) an integer [0-10] when asked for 'How many' or 'What is the " + "number of', or (3) 'Yes' or 'No' when asked for 'Is there'. If you think there are no possible answers " + "or the question is not clear, choose the best answer that fits the question.\n\n" + ).format(self.dataset_utils.all_answers()) + + instruction_2 += ( + "Write your response into this json template: " "{'Reasoning': '', 'Answer': ''}" + ) + + return instruction_1, instruction_2 diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/ayavision.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/ayavision.py new file mode 100644 index 0000000000000000000000000000000000000000..f1e317693204e3f3ae6ea39fb5d8826a06335cf3 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/ayavision.py @@ -0,0 +1,50 @@ +import pandas as pd +from ...smp import * + + +FAIL_MSG = "Failed to obtain answer via API." + + +def build_prompt_ayavision(line): + question = line["question"] + prediction = str(line["prediction"]) + answer = str(line["answer"]) + + tmpl = ( + "You are an expert evaluator. Your task is to determine if the predicted answer " + "is a correct response to the given question, using the ground truth answer as a reference. " + "The predicted answer does not need to be a verbatim match of the ground truth, " + "but it must be semantically equivalent and accurately answer the question.\n" + "Respond with '[[CORRECT]]' if the prediction is correct, and '[[WRONG]]' if it is incorrect. " + "Do not provide any explanation.\n\n" + "Question: {question}\n" + "Ground Truth Answer: {answer}\n" + "Predicted Answer: {prediction}\n\n" + "Is the prediction correct? " + ) + return tmpl.format(question=question, answer=answer, prediction=prediction) + + +def AyaVision_auxeval(model, line): + prompt = build_prompt_ayavision(line) + log = "" + retry = 5 + + for i in range(retry): + res = model.generate(prompt, temperature=i * 0.5) + + if FAIL_MSG in res: + log += f"Try {i}: output is {res}, failed to parse.\\n" + elif "[[CORRECT]]" in res: + log += "Succeed" + hit = 1 + return dict(log=log, res=res, hit=hit) + elif "[[WRONG]]" in res: + log += "Succeed" + hit = 0 + return dict(log=log, res=res, hit=hit) + else: + log += f"Try {i}: output is {res}, failed to parse.\\n" + + log += "All 5 retries failed.\\n" + return dict(log=log, res="", hit=0) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__init__.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__pycache__/__init__.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb4a878a2867da6ccb8ee36166c3ba56e621c47b Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__pycache__/__init__.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__pycache__/mp_util.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__pycache__/mp_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6fb83d2eee2b4979521736126a7845be4719d02 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/__pycache__/mp_util.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_configs/__pycache__/global_config.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_configs/__pycache__/global_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d87ef2006d9b0369adf086ad69785e4c212179d Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_configs/__pycache__/global_config.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_req.txt b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_req.txt new file mode 100644 index 0000000000000000000000000000000000000000..c754c049d3e33936b75183ce6ae846392756fe12 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/eval_req.txt @@ -0,0 +1,5 @@ +pdf2image +colormath +squarify +matplotlib_venn +Pillow diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__init__.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/color_utils.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/color_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac417536c7c1d334493c10e07bc05213822e946b Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/color_utils.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/chart_type_and_color.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/chart_type_and_color.py new file mode 100644 index 0000000000000000000000000000000000000000..e01ff1b30cf7a99a3e9c6cbffb8a552a77c983f2 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/chart_type_and_color.py @@ -0,0 +1,971 @@ +# flake8: noqa +import inspect +from matplotlib.patches import Ellipse +from matplotlib.image import NonUniformImage +from matplotlib.projections.polar import PolarAxes +import networkx.drawing.nx_pylab as nx_pylab +import matplotlib.colors as mcolors +from mpl_toolkits.mplot3d import Axes3D +from matplotlib.axes._axes import Axes +from matplotlib.axes._base import _process_plot_var_args +import matplotlib.pyplot as plt +import matplotlib +import squarify + +import warnings +warnings.filterwarnings("ignore", category=UserWarning) + +# sys.path.insert(0, f'{os.environ["PROJECT_PATH"]}') + + +drawed_colors = [] +in_decorator = False + + +def convert_color_to_hex(color): + 'Convert color from name, RGBA, or hex to a hex format.' + try: + # First, try to convert from color name to RGBA to hex + if isinstance(color, str): + # Check if it's already a hex color (start with '#' and length + # either 7 or 9) + if color.startswith('#') and (len(color) == 7 or len(color) == 9): + return color.upper() + else: + return mcolors.to_hex(mcolors.to_rgba(color)).upper() + # Then, check if it's in RGBA format + elif isinstance(color, (list, tuple)) and len(color) == 4: + return mcolors.to_hex(color).upper() + else: + raise ValueError("Unsupported color format") + except ValueError as e: + print(color) + print("Error converting color:", e) + return None + + +def log_function_specific_for_draw_networkx_labels(func): + def wrapper( + G, + pos, + labels=None, + font_size=12, + font_color="k", + font_family="sans-serif", + font_weight="normal", + alpha=None, + bbox=None, + horizontalalignment="center", + verticalalignment="center", + ax=None, + clip_on=True, + ): + global drawed_colors + global in_decorator + + if not in_decorator: + in_decorator = True + + func_name = inspect.getfile(func) + "/" + func.__name__ + + result = func( + G, + pos, + labels=labels, + font_size=font_size, + font_color=font_color, + font_family=font_family, + font_weight=font_weight, + alpha=alpha, + bbox=bbox, + horizontalalignment=horizontalalignment, + verticalalignment=verticalalignment, + ax=ax, + clip_on=clip_on + ) + + for item in result.values(): + color = convert_color_to_hex(item.get_color()) + drawed_colors.append(func_name + "--" + color) + + in_decorator = False + else: + return func( + G, + pos, + labels=labels, + font_size=font_size, + font_color=font_color, + font_family=font_family, + font_weight=font_weight, + alpha=alpha, + bbox=bbox, + horizontalalignment=horizontalalignment, + verticalalignment=verticalalignment, + ax=ax, + clip_on=clip_on + ) + return result + wrapper.__name__ = func.__name__ + return wrapper + + +def log_function_specific_for_draw_networkx_edges(func): + def wrapper( + G, + pos, + edgelist=None, + width=1.0, + edge_color="k", + style="solid", + alpha=None, + arrowstyle=None, + arrowsize=10, + edge_cmap=None, + edge_vmin=None, + edge_vmax=None, + ax=None, + arrows=None, + label=None, + node_size=300, + nodelist=None, + node_shape="o", + connectionstyle="arc3", + min_source_margin=0, + min_target_margin=0, + ): + global drawed_colors + global in_decorator + + if not in_decorator: + in_decorator = True + + func_name = inspect.getfile(func) + "/" + func.__name__ + + result = func( + G, + pos, + edgelist=edgelist, + width=width, + edge_color=edge_color, + style=style, + alpha=alpha, + arrowstyle=arrowstyle, + arrowsize=arrowsize, + edge_cmap=edge_cmap, + edge_vmin=edge_vmin, + edge_vmax=edge_vmax, + ax=ax, + arrows=arrows, + label=label, + node_size=node_size, + nodelist=nodelist, + node_shape=node_shape, + connectionstyle=connectionstyle, + min_source_margin=min_source_margin, + min_target_margin=min_target_margin + ) + + for item in result.get_edgecolors().tolist(): + color = convert_color_to_hex(item) + drawed_colors.append(func_name + "--" + color) + + in_decorator = False + else: + return func( + G, + pos, + edgelist=edgelist, + width=width, + edge_color=edge_color, + style=style, + alpha=alpha, + arrowstyle=arrowstyle, + arrowsize=arrowsize, + edge_cmap=edge_cmap, + edge_vmin=edge_vmin, + edge_vmax=edge_vmax, + ax=ax, + arrows=arrows, + label=label, + node_size=node_size, + nodelist=nodelist, + node_shape=node_shape, + connectionstyle=connectionstyle, + min_source_margin=min_source_margin, + min_target_margin=min_target_margin + ) + return result + wrapper.__name__ = func.__name__ + return wrapper + + +def log_function_specific_for_draw_networkx_nodes(func): + def wrapper( + G, + pos, + nodelist=None, + node_size=300, + node_color="#1f78b4", + node_shape="o", + alpha=None, + cmap=None, + vmin=None, + vmax=None, + ax=None, + linewidths=None, + edgecolors=None, + label=None, + margins=None, + ): + global drawed_colors + global in_decorator + + if not in_decorator: + in_decorator = True + + func_name = inspect.getfile(func) + "/" + func.__name__ + + result = func( + G, + pos, + nodelist=nodelist, + node_size=node_size, + node_color=node_color, + node_shape=node_shape, + alpha=alpha, + cmap=cmap, + vmin=vmin, + vmax=vmax, + ax=ax, + linewidths=linewidths, + edgecolors=edgecolors, + label=label, + margins=margins + ) + + for item in result.get_facecolors().tolist(): + color = convert_color_to_hex(item) + drawed_colors.append(func_name + "--" + color) + + in_decorator = False + else: + return func( + G, + pos, + nodelist=nodelist, + node_size=node_size, + node_color=node_color, + node_shape=node_shape, + alpha=alpha, + cmap=cmap, + vmin=vmin, + vmax=vmax, + ax=ax, + linewidths=linewidths, + edgecolors=edgecolors, + label=label, + margins=margins + ) + return result + wrapper.__name__ = func.__name__ + return wrapper + + +def log_function_for_3d(func): + def wrapper(*args, **kwargs): + global drawed_colors + global in_decorator + + if not in_decorator: + in_decorator = True + + func_name = inspect.getfile(func) + "/" + func.__name__ + + result = func(*args, **kwargs) + + if func.__name__ == "scatter": + # check whether cmap is used + if "cmap" in kwargs and kwargs["cmap"] is not None: + print("cmap is used", kwargs["cmap"]) + drawed_colors.append(func_name + "--" + kwargs["cmap"]) + else: + for item in result.get_facecolors().tolist(): + color = convert_color_to_hex(item) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "plot": + for line in result: + color = convert_color_to_hex(line.get_color()) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "plot_surface": + if "cmap" in kwargs and kwargs["cmap"] is not None: + print("cmap is used", kwargs["cmap"]) + drawed_colors.append(func_name + "--" + kwargs["cmap"]) + else: + colors = result.get_facecolors().tolist() + drawed_colors.append( + func_name + + "--" + + convert_color_to_hex( + colors[0])) + elif func.__name__ == "bar3d": + colors = result.get_facecolors().tolist() + drawed_colors.append( + func_name + + "--" + + convert_color_to_hex( + colors[0])) + elif func.__name__ == "bar": + for item in result: + color = convert_color_to_hex(item.get_facecolor()) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "add_collection3d": + colors = result.get_facecolors().tolist() + for color in colors: + drawed_colors.append( + func_name + "--" + convert_color_to_hex(color)) + + in_decorator = False + else: + return func(*args, **kwargs) + return result + + wrapper.__name__ = func.__name__ + return wrapper + + +def log_function(func): + def wrapper(*args, **kwargs): + global drawed_colors + global in_decorator + + if not in_decorator: + in_decorator = True + + func_name = inspect.getfile(func) + "/" + func.__name__ + + result = func(*args, **kwargs) + + if func.__name__ == "_makeline": + color = convert_color_to_hex(result[1]["color"]) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "axhline": + color = convert_color_to_hex(result.get_color()) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "axvline": + color = convert_color_to_hex(result.get_color()) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "_fill_between_x_or_y": + color = convert_color_to_hex(list(result.get_facecolors()[0])) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "bar": + for item in result: + color = convert_color_to_hex( + list(item._original_facecolor)) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "scatter" and not isinstance(args[0], PolarAxes): + # check whether cmap is used + if "cmap" in kwargs and kwargs["cmap"] is not None: + print("cmap is used", kwargs["cmap"]) + drawed_colors.append(func_name + "--" + kwargs["cmap"]) + else: + color = convert_color_to_hex( + list(result.get_facecolor()[0])) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "pie": + for item in result[0]: + color = convert_color_to_hex(item.get_facecolor()) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "axvspan": + color = convert_color_to_hex(result.get_facecolor()) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "axhspan": + color = convert_color_to_hex(result.get_facecolor()) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "hlines": + for item in result.get_edgecolors(): + color = convert_color_to_hex(list(item)) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "vlines": + for item in result.get_edgecolors(): + color = convert_color_to_hex(list(item)) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "boxplot": + for item in result["boxes"]: + if isinstance(item, matplotlib.patches.PathPatch): + color = convert_color_to_hex( + list(item.get_facecolor())) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "violinplot": + for item in result["bodies"]: + color = convert_color_to_hex(list(item.get_facecolor()[0])) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "hist": + tops, bins, patches = result + if not isinstance(patches, matplotlib.cbook.silent_list): + for item in patches: + color = convert_color_to_hex( + list(item.get_facecolor())) + drawed_colors.append(func_name + "--" + color) + else: + for container in patches: + for item in container: + color = convert_color_to_hex( + list(item.get_facecolor())) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "quiver": + for item in result.get_facecolors().tolist(): + color = convert_color_to_hex(item) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "plot" and len(args) > 0 and isinstance(args[0], PolarAxes): + lines = result + for line in lines: + color = convert_color_to_hex(line.get_color()) + drawed_colors.append(func_name + "_polar" + "--" + color) + elif func.__name__ == "scatter" and isinstance(args[0], PolarAxes): + # check whether cmap is used + if "cmap" in kwargs and kwargs["cmap"] is not None: + print("cmap is used", kwargs["cmap"]) + drawed_colors.append(func_name + "--" + kwargs["cmap"]) + else: + color = convert_color_to_hex( + list(result.get_facecolor()[0])) + drawed_colors.append(func_name + "_polar" + "--" + color) + elif func.__name__ == "plot" and "squarify" in func_name: + # get ax + ax = result + # get container + containers = ax.containers + for container in containers: + for item in container: + color = convert_color_to_hex( + list(item.get_facecolor())) + drawed_colors.append( + func_name + "_squarify" + "--" + color) + elif func.__name__ == "imshow": + colormap = result.get_cmap().name + drawed_colors.append(func_name + "--" + colormap) + elif func.__name__ == "pcolor": + colormap = result.get_cmap().name + drawed_colors.append(func_name + "--" + colormap) + elif func.__name__ == "contour": + colormap = result.get_cmap().name + drawed_colors.append(func_name + "--" + colormap) + elif func.__name__ == "contourf": + colormap = result.get_cmap().name + drawed_colors.append(func_name + "--" + colormap) + elif func.__name__ == "fill": + patches = result + for patch in patches: + color = convert_color_to_hex(list(patch.get_facecolor())) + drawed_colors.append(func_name + "--" + color) + elif func.__name__ == "__init__" and isinstance(args[0], NonUniformImage): + colormap = args[0].get_cmap().name + drawed_colors.append(func_name + "--" + colormap) + elif func.__name__ == "broken_barh": + colors = result.get_facecolors().tolist() + for color in colors: + drawed_colors.append( + func_name + "--" + convert_color_to_hex(color)) + elif func.__name__ == "__init__" and isinstance(args[0], Ellipse): + color = convert_color_to_hex(args[0].get_facecolor()) + drawed_colors.append(func_name + "--" + color) + + in_decorator = False + else: + return func(*args, **kwargs) + return result + wrapper.__name__ = func.__name__ + return wrapper + + +_process_plot_var_args._makeline = log_function( + _process_plot_var_args._makeline) +Axes.bar = log_function(Axes.bar) +Axes.scatter = log_function(Axes.scatter) +Axes.axhline = log_function(Axes.axhline) +Axes.axvline = log_function(Axes.axvline) +Axes._fill_between_x_or_y = log_function(Axes._fill_between_x_or_y) +Axes.pie = log_function(Axes.pie) +Axes.axvspan = log_function(Axes.axvspan) +Axes.axhspan = log_function(Axes.axhspan) +Axes.hlines = log_function(Axes.hlines) +Axes.vlines = log_function(Axes.vlines) +Axes.boxplot = log_function(Axes.boxplot) +Axes.violinplot = log_function(Axes.violinplot) +Axes.hist = log_function(Axes.hist) +Axes.plot = log_function(Axes.plot) +Axes.quiver = log_function(Axes.quiver) +Axes.imshow = log_function(Axes.imshow) +Axes.pcolor = log_function(Axes.pcolor) +Axes.contour = log_function(Axes.contour) +Axes.contourf = log_function(Axes.contourf) +Axes.fill = log_function(Axes.fill) +NonUniformImage.__init__ = log_function(NonUniformImage.__init__) +Ellipse.__init__ = log_function(Ellipse.__init__) +Axes.broken_barh = log_function(Axes.broken_barh) + +nx_pylab.draw_networkx_nodes = log_function_specific_for_draw_networkx_nodes( + nx_pylab.draw_networkx_nodes) +nx_pylab.draw_networkx_edges = log_function_specific_for_draw_networkx_edges( + nx_pylab.draw_networkx_edges) +nx_pylab.draw_networkx_labels = log_function_specific_for_draw_networkx_labels( + nx_pylab.draw_networkx_labels) + + +squarify.plot = log_function(squarify.plot) + +Axes3D.scatter = log_function_for_3d(Axes3D.scatter) +Axes3D.plot = log_function_for_3d(Axes3D.plot) +Axes3D.plot_surface = log_function_for_3d(Axes3D.plot_surface) +Axes3D.bar3d = log_function_for_3d(Axes3D.bar3d) +Axes3D.bar = log_function_for_3d(Axes3D.bar) +Axes3D.add_collection3d = log_function_for_3d(Axes3D.add_collection3d) + +# barh test +# draw a simple barh plot +# fig, ax = plt.subplots() +# ax.barh(np.arange(5), np.random.rand(5)) +# ax.barh(np.arange(5), np.random.rand(5)) +# plt.show() + +# axhline test +# fig, ax = plt.subplots() +# ax.axhline(0.5) +# ax.axhline(0.8) +# plt.show() + +# axvline test +# fig, ax = plt.subplots() +# ax.axvline(0.5) +# ax.axvline(0.8) +# plt.show() + +# errorbar test +# fig, ax = plt.subplots() +# x = np.arange(10) +# y = np.sin(x) +# +# ax.errorbar(x, y, yerr=0.1) +# ax.errorbar(x, y, yerr=0.2) +# plt.show() + +# squarify test +# fig, ax = plt.subplots() +# sizes = [50, 25, 25] +# squarify.plot(sizes=sizes, ax=ax) +# plt.savefig("tmp.png") +# plt.show() + +# loglog test +# fig, ax = plt.subplots() +# x = np.linspace(0, 10, 100) +# y = x**2 +# ax.loglog(x, y) +# plt.show() + +# fill_between test +# fig, ax = plt.subplots() +# x = np.arange(10) +# y1 = np.sin(x) +# y2 = np.cos(x) +# ax.fill_between(x, y1, y2, cmap='viridis') +# plt.show() + +# fill_betweenx test +# fig, ax = plt.subplots() +# x = np.arange(10) +# y1 = np.sin(x) +# y2 = np.cos(x) +# ax.fill_betweenx(x, y1, y2, cmap='viridis') +# plt.show() + +# pie test +# fig, ax = plt.subplots() +# sizes = [50, 25, 25] +# ax.pie(sizes) +# plt.savefig("tmp.png") +# plt.show() + +# axvspan test +# fig, ax = plt.subplots() +# ax.axvspan(0.2, 0.3, color='red', alpha=0.5) +# ax.axvspan(0.5, 0.7, color='blue', alpha=0.5) +# plt.show() + +# axhspan test +# fig, ax = plt.subplots() +# ax.axhspan(0.2, 0.3, color='red', alpha=0.5) +# ax.axhspan(0.5, 0.7, color='blue', alpha=0.5) +# plt.show() + + +# hlines test +# fig, ax = plt.subplots() +# y_values = [1, 2, 3, 4, 5] +# xmin = 0 +# xmax = 10 +# ax.hlines(y=y_values, xmin=xmin, xmax=xmax, linestyles='dashed') +# ax.set_xlabel('X-axis') +# ax.set_ylabel('Y-axis') +# plt.savefig("tmp.png") +# plt.show() + +# vlines test +# fig, ax = plt.subplots() +# x_values = [1, 2, 3, 4, 5] +# ymin = 0 +# ymax = 10 +# ax.vlines(x=x_values, ymin=ymin, ymax=ymax, linestyles='dashed') +# ax.set_xlabel('X-axis') +# ax.set_ylabel('Y-axis') +# plt.savefig("tmp.png") +# plt.show() + +# boxplot test +# fig, ax = plt.subplots() +# data = np.random.rand(10, 3) +# ax.boxplot(data, patch_artist=True) +# plt.savefig("tmp.png") +# plt.show() + +# violin test +# fig, ax = plt.subplots() +# data = np.random.rand(10, 3) +# ax.violinplot(data) +# plt.savefig("tmp.png") +# plt.show() + +# hist test +# fig, ax = plt.subplots() +# data = np.random.rand(100, 1) +# ax.hist(data, bins=10) +# plt.savefig("tmp.png") +# plt.show() + + +# networkx test +# fig, ax = plt.subplots() +# G = networkx.complete_graph(5) +# draw the graph, give each node a different color, and a label. make the edges red and blue, with labels +# networkx.draw(G, ax=ax, node_color='r', edge_color='b', labels={0: '0', 1: '1', 2: '2', 3: '3', 4: '4'}) +# plt.savefig("tmp.png") +# plt.show() + +# quiver test +# fig, ax = plt.subplots() +# x = np.linspace(0, 10, 10) +# y = np.linspace(0, 10, 10) +# u = np.zeros(10) +# v = np.ones(10) +# # draw the quiver plot, with color red +# ax.quiver(x, y, u, v, color='r') +# plt.savefig("tmp.png") +# plt.show() + +# 3d scatter test +# fig, ax = plt.subplots(subplot_kw={'projection': '3d'}) +# x = np.random.rand(10) +# y = np.random.rand(10) +# z = np.random.rand(10) +# draw the scatter plot, with color red +# ax.scatter3D(x, y, z, c='#ff2395') +# plt.savefig("tmp.png") +# plt.show() + +# 3d plot test +# fig, ax = plt.subplots(subplot_kw={'projection': '3d'}) +# draw two lines in 3d, with color red and blue +# ax.plot([0, 1], [0, 1], [0, 1], color='r') +# ax.plot([0, 1], [0, 1], [1, 0], color='b') + +# 3d plot_surface test +# fig, ax = plt.subplots(subplot_kw={'projection': '3d'}) +# draw a surface plot, with a beautiful colormap +# X = np.linspace(-5, 5, 100) +# Y = np.linspace(-5, 5, 100) +# X, Y = np.meshgrid(X, Y) +# Z = np.sin(np.sqrt(X**2 + Y**2)) +# ax.plot_surface(X, Y, Z, cmap='viridis') +# plt.savefig("tmp.png") +# plt.show() + +# 3d bar test +# fig, ax = plt.subplots(subplot_kw={'projection': '3d'}) +# x = np.arange(10) +# y = np.random.rand(10) +# z = np.zeros(10) +# dx = np.ones(10) +# dy = np.ones(10) +# dz = np.random.rand(10) +# # draw the 3d bar plot, with color red +# ax.bar3d(x, y, z, dx, dy, dz) +# plt.savefig("tmp.png") +# plt.show() + +# # bar2d in axes3d test +# fig, ax = plt.subplots(subplot_kw={'projection': '3d'}) +# x = np.arange(10) +# y = np.random.rand(10) +# z = np.zeros(10) +# dx = np.ones(10) +# dy = np.ones(10) +# dz = np.random.rand(10) +# # draw the 2d bar plot, with color red +# ax.bar(x, y, z, zdir='y', color=['r', 'b', 'g', 'y', 'm', 'c', 'k', 'w', 'r', 'b']) +# plt.savefig("tmp.png") +# plt.show() + + +# plot in test +# fig, ax = plt.subplots() +# x = np.linspace(0, 10, 100) +# y = np.sin(x) +# draw the plot, with color red +# ax.plot(x, y, color='r') +# plt.savefig("tmp.png") +# plt.show() + +# matshow in test +# fig, ax = plt.subplots() +# data = np.random.rand(10, 10) +# draw the matshow plot, with a beautiful colormap +# ax.imshow(data, cmap='pink') +# plt.savefig("tmp.png") +# plt.show() + +# pcolor in test +# fig, ax = plt.subplots() +# data = np.random.rand(10, 10) +# draw the pcolor plot, with a beautiful colormap +# ax.pcolor(data) +# plt.savefig("tmp.png") +# plt.show() + +# # contour in test +# fig, ax = plt.subplots() +# x = np.linspace(0, 10, 100) +# y = np.linspace(0, 10, 100) +# X, Y = np.meshgrid(x, y) +# Z = np.sin(X) * np.cos(Y) +# # draw the contour plot, with a beautiful colormap +# ax.contour(X, Y, Z) +# plt.savefig("tmp.png") +# plt.show() + +# # contourf in test +# fig, ax = plt.subplots() +# x = np.linspace(0, 10, 100) +# y = np.linspace(0, 10, 100) +# X, Y = np.meshgrid(x, y) +# Z = np.sin(X) * np.cos(Y) +# # draw the contourf plot, with a beautiful colormap +# ax.contourf(X, Y, Z, cmap='viridis') +# plt.savefig("tmp.png") +# plt.show() + +# stackplot in test +# fig, ax = plt.subplots() +# x = np.linspace(0, 10, 100) +# y1 = np.sin(x) +# y2 = np.cos(x) +# y3 = np.tan(x) +# draw the stackplot, with beautiful colors +# ax.stackplot(x, y1, y2, y3, colors=['r', 'g', 'b']) +# plt.savefig("tmp.png") +# plt.show() + +# fill in test +# fig, ax = plt.subplots() +# x = np.linspace(0, 10, 100) +# y1 = np.sin(x) +# y2 = np.cos(x) +# draw the fill plot, with color red +# ax.fill(x, y1, color='r') +# plt.savefig("tmp.png") +# plt.show() + + +# # NonUniformImage in test +# fig, ax = plt.subplots() +# data = np.random.rand(10, 10) +# x = np.linspace(-4, 4, 9) +# y = np.linspace(-4, 4, 9) +# z = np.sqrt(x[np.newaxis, :] ** 2 + y[:, np.newaxis] ** 2) +# im = NonUniformImage(ax, interpolation='bilinear') +# im.set_data(x, y , z) +# ax.add_image(im) +# plt.savefig("tmp.png") +# plt.show() + +# broken_barh in test +# fig, ax = plt.subplots() +# x = np.linspace(0, 10, 100) +# y = np.sin(x) +# draw the broken_barh plot, with color red +# ax.broken_barh([(1, 2), (3, 4)], (0, 1), facecolors='r') +# plt.savefig("tmp.png") +# plt.show() + + +# Ellipse in test +fig, ax = plt.subplots() +e = matplotlib.patches.Ellipse((0.5, 0.5), 0.4, 0.2, color='r') +ax.add_patch(e) +plt.savefig("tmp.png") +plt.show() + + +# # radar plot in test +# fig, ax = plt.subplots(subplot_kw={'projection': 'polar'}) +# theta = np.linspace(0, 2*np.pi, 100) +# r = np.sin(3*theta)**2 +# # draw the radar plot, with color red +# ax.plot(theta, r, color='r') +# plt.savefig("tmp.png") +# plt.show() + + +# import numpy as np; np.random.seed(0) + +# import matplotlib.pyplot as plt +# from matplotlib.lines import Line2D + +# # =================== +# # Part 2: Data Preparation +# # =================== +# # Data for PC1 and PC2 +# values_pc1 = [0.8, 0.7, 0.6, 0.85, 0.9, 0.75, 0.7, 0.65, 0.8, 0.9] +# values_pc2 = [0.6, 0.55, 0.5, 0.45, 0.4, 0.35, 0.3, 0.25, 0.2, 0.15] +# num_vars = len(values_pc1) + +# # Compute angle for each axis +# angles = np.linspace(0, 2 * np.pi, num_vars, endpoint=False).tolist() + +# # The plot is circular, so we need to "complete the loop" and append the start to the end. +# values_pc1 += values_pc1[:1] +# values_pc2 += values_pc2[:1] +# angles += angles[:1] + +# # =================== +# # Part 3: Plot Configuration and Rendering +# # =================== +# # Draw the radar chart +# fig, ax = plt.subplots(figsize=(8, 8), subplot_kw=dict(polar=True)) +# ax.fill(angles, values_pc1, color="black", alpha=0.1) +# ax.plot(angles, values_pc1, color="black", linewidth=2, label="Loadings PC1") +# ax.scatter(angles[:-1], values_pc1[:-1], color="black", s=50) +# ax.fill(angles, values_pc2, color="red", alpha=0.1) +# ax.plot(angles, values_pc2, color="red", linewidth=2, label="Loadings PC2") +# ax.scatter(angles[:-1], values_pc2[:-1], color="red", s=50) + +# # Add labels to the plot +# ax.set_yticklabels([]) +# grid_angles = np.linspace(0, 2 * np.pi, 8, endpoint=False) +# ax.set_xticks(grid_angles) +# angle_labels = [f"{i*45}°" for i in range(8)] +# ax.set_xticklabels(angle_labels) + +# # Add grid lines and labels for the concentric circles +# ax.set_rgrids( +# [0.2, 0.4, 0.6, 0.8, 1.0], +# labels=["0.2", "0.4", "0.6", "0.8", "1.0"], +# angle=30, +# color="black", +# size=10, +# ) + +# # Create legend handles manually +# legend_elements = [ +# Line2D( +# [0], +# [0], +# color="black", +# linewidth=2, +# marker="o", +# markersize=8, +# label="Loadings PC1", +# ), +# Line2D( +# [0], +# [0], +# color="red", +# linewidth=2, +# marker="o", +# markersize=8, +# label="Loadings PC2", +# ), +# ] + +# # Add legend and title +# ax.legend( +# handles=legend_elements, loc="upper right", bbox_to_anchor=(1.1, 1.1), frameon=False +# ) + +# # =================== +# # Part 4: Saving Output +# # =================== +# # Adjust layout and save the plot +# plt.tight_layout() +# plt.savefig('tmp.png') + + +# poly3d in test +# import math +# import matplotlib.pyplot as plt +# import numpy as np; np.random.seed(0) + +# from matplotlib.collections import PolyCollection + +# # =================== +# # Part 2: Data Preparation +# # =================== +# # Fixing random state for reproducibility +# def polygon_under_graph(x, y): +# """ +# Construct the vertex list which defines the polygon filling the space under +# the (x, y) line graph. This assumes x is in ascending order. +# """ +# return [(x[0], 0.0), *zip(x, y), (x[-1], 0.0)] + + +# x = np.linspace(0.0, 10.0, 31) +# vaccination_numbers = range(1, 4) + +# # verts[i] is a list of (x, y) pairs defining polygon i. +# gamma = np.vectorize(math.gamma) +# verts = [ +# polygon_under_graph(x, v**x * np.exp(-v) / gamma(x + 1)) +# for v in vaccination_numbers +# ] + +# # =================== +# # Part 3: Plot Configuration and Rendering +# # =================== +# ax = plt.figure(figsize=(8, 6)).add_subplot(projection="3d") +# facecolors = plt.colormaps["viridis_r"](np.linspace(0, 1, len(verts))) + +# poly = PolyCollection(verts, facecolors=facecolors, alpha=0.7) +# ax.add_collection3d(poly, zs=vaccination_numbers, zdir="y") + +# ax.set( +# xlim=(0, 10), +# ylim=(1, 4), +# zlim=(0, 0.35), +# xlabel="Age", +# ylabel="Vaccination Number", +# zlabel="Incidence Rate", +# ) + +# ax.set_yticks([1, 2, 3]) +# ax.set_box_aspect(aspect=None, zoom=0.8) + +# # =================== +# # Part 4: Saving Output +# # =================== +# plt.tight_layout() +# plt.savefig('3d_14.pdf', bbox_inches='tight') + + +drawed_colors = set(drawed_colors) +print("drawed_colors", drawed_colors) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/chart_type_evaluator.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/chart_type_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..ec056bcee09526a1b8526499f357862010850fcf --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/chart_type_evaluator.py @@ -0,0 +1,179 @@ +# flake8: noqa +from typing import Dict +# from dotenv import load_dotenv +# load_dotenv() + +import os +from ..eval_configs.global_config import run_script_safe + + +class ChartTypeEvaluator: + + def __init__(self): + self.metrics = { + "precision": 0, + "recall": 0, + "f1": 0 + } + + def __call__(self, generation_code_file, golden_code_file): + generation_chart_types = self._get_chart_types(generation_code_file) + golden_chart_types = self._get_chart_types(golden_code_file) + + self.golden_code_file = golden_code_file + + self._calculate_metrics(generation_chart_types, golden_chart_types) + + # [TAG] What is this for? + # redunant_file = os.environ["VLMEVAL_CHARTMIMIC_UTILS_PATH"] + "/" + os.path.basename(golden_code_file).replace(".py", ".pdf") + # print(f"redunant_file: {redunant_file}") + # breakpoint() + # # if os.path.exists(redunant_file) == True: + # os.remove(redunant_file) + + # print(self.metrics) + + def _get_chart_types(self, code_file): + + with open(code_file, "r") as f: + lines = f.readlines() + code = "".join(lines) + + prefix = self._get_prefix() + output_file = code_file.replace(".py", "_log_chart_types.txt") + suffix = self._get_suffix(output_file) + code = prefix + code + suffix + + code_log_chart_types_file = code_file.replace( + ".py", "_log_chart_types.py") + with open(code_log_chart_types_file, "w") as f: + f.write(code) + + # os.system(f"python {code_log_chart_types_file}") + success = run_script_safe(code_log_chart_types_file) + if not success: + print("Skip downstream logic due to previous failure.") + # optionally return default result or continue + + if os.path.exists(output_file): + with open(output_file, "r") as f: + chart_types = f.read() + chart_types = eval(chart_types) + os.remove(output_file) + else: + chart_types = {} + os.remove(code_log_chart_types_file) + + # pdf_file = re.findall(r"plt\.savefig\('(.*)'\)", code) + # if len(pdf_file) != 0: + # pdf_file = pdf_file[0].split(",")[0][:-1] + # print(pdf_file) + # if os.path.basename(pdf_file) == pdf_file: + # os.remove(pdf_file) + + return chart_types + + def _calculate_metrics( + self, generation_chart_types: Dict[str, int], golden_chart_types: Dict[str, int]): + """ + Calculate precision, recall, and f1 score of the chart types. + + Args: + - generation_chart_types: Dict[str, int] + - key: chart type + - value: number of times the chart type is called + - golden_chart_types: Dict[str, int] + - key: chart type + - value: number of times the chart type is called + """ + if len(generation_chart_types) == 0: + return + + n_correct = 0 + total = sum(generation_chart_types.values()) + + for chart_type, count in generation_chart_types.items(): + if chart_type in golden_chart_types: + n_correct += min(count, golden_chart_types[chart_type]) + + self.metrics["precision"] = n_correct / total + try: + self.metrics["recall"] = n_correct / \ + sum(golden_chart_types.values()) + except BaseException: + print( + "<<<<<<<<<<<<<<<<<<< list: + """ + Parallel execution with progress tracking and safe interim saving. + """ + # Prepare persistent storage + if save: + os.makedirs(os.path.dirname(save), exist_ok=True) + if not os.path.exists(save): + dump({}, save) + res = load(save) + else: + res = {} + + results = [None] * len(tasks) + future_to_idx = {} + + # Use process pool to bypass GIL for CPU-bound tasks + with ProcessPoolExecutor(max_workers=nproc) as executor: + for idx, inp in enumerate(tasks): + # Support dict, tuple/list, or single-value tasks + if isinstance(inp, dict): + future = executor.submit(func, **inp) + elif isinstance(inp, (list, tuple)): + future = executor.submit(func, *inp) + else: + future = executor.submit(func, inp) + future_to_idx[future] = idx + + # Display progress bar as tasks complete + with tqdm(total=len(tasks)) as pbar: + for future in as_completed(future_to_idx): + idx = future_to_idx[future] + key = keys[idx] if keys else None + try: + result = future.result() + except Exception as e: + exc_type = type(e).__name__ + err_msg = f"[{exc_type}] Exception in task {key or idx}: {str(e)}" + logger.error(err_msg) + logger.error("Full traceback:") + logger.error(traceback.format_exc()) + + # Optional: attach traceback to result for downstream + # reference + result = getattr(e, 'result', (-1, { + 'msg': err_msg, + 'traceback': traceback.format_exc(), + })) + + results[idx] = result + # Update persistent results + if keys and key is not None: + res[key] = result + if save: + dump(res, save) # save after each task + + pbar.update(1) + + return results diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/hrbench.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/hrbench.py new file mode 100644 index 0000000000000000000000000000000000000000..894128067fbe11cd0c028c19b65ca9a7b64bce23 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/hrbench.py @@ -0,0 +1,54 @@ +from ...smp import * +import os + + +def report_acc_hrbench(df): + cycle_group = df.groupby('cycle_category') + result_dic = defaultdict(list) + avg_dic = defaultdict(int) + + count = 0 + for key, data_value in cycle_group: + count += 1 + _, resp_dic = hrbench_score(data_value) + + for task_type, accuracy in resp_dic.items(): + result_dic['cycle'].append(key) + result_dic['type'].append(task_type) + result_dic['accuracy'].append(accuracy) + + avg_dic[task_type] += accuracy + for task_type, accuracy in avg_dic.items(): + result_dic['cycle'].append('Average') + result_dic['type'].append(task_type) + result_dic['accuracy'].append(accuracy / count) + result_pd = pd.DataFrame(result_dic) + + return result_pd + + +def hrbench_score(data): + ret = defaultdict(list) + resp_dic = {} + category_list = set(data['category']) + score_dict = defaultdict(list) + + for i in range(len(data)): + d = data.iloc[i] + category = d['category'] + gpt_score = d['hit'] + score_dict[category].append(gpt_score) + score_dict['all'].append(gpt_score) + + all_acc = np.mean(score_dict['all']) + ret['type'].append('all') + ret['acc'].append(all_acc) + resp_dic['all'] = all_acc + for cate in category_list: + acc = np.mean(score_dict[cate]) + ret['type'].append(cate) + ret['acc'].append(acc) + + resp_dic[cate] = acc + + return pd.DataFrame(ret), resp_dic diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/logicvista.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/logicvista.py new file mode 100644 index 0000000000000000000000000000000000000000..9f8fcdd182a4f73045c20826fc3dc5273d05bf7a --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/logicvista.py @@ -0,0 +1,150 @@ +import pandas as pd + +# from colorama import Fore, Back, Style +from ...smp import * + + +FAIL_MSG = 'Failed to obtain answer via API.' + + +def build_prompt_logicvista(line): + question = line['question'] + prediction = str(line['prediction']) + tmpl = ( + "You are a information extractor that extracts multiple choice letter answer choices " + "from a paragraph that contains the answer choice and sometimes explaination of why that " + "choice is correct to the given question.\n" + "What letter did the following answer choose? If the answer did not select a letter answer choice, " + "first try to infer the answer based off the given choices.\n" + "If it does not seem like the given answer corresponds to an answer choice OR if there is no selected answer, please just respond with Z.\n" # noqa: E501 + "Make sure you answer with ONLY the letters chosen.\n" + 'Example 1: \n' + 'Question: \nWhat is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\n\n' + 'Answer: \na cute teddy bear\n\nYour output: A\n' + 'Example 2: \n' + 'Question: \nWhat is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog\n\n' + 'Answer: \nSpider\n\nYour output: Z\n' + 'Example 3: \n' + 'Question: \nWhich figure is a rotation of the object?\n\n' + 'Answer: \nThe figure on the right, labeled "D," is a rotation of the object shown in the top left corner.\n\nYour output: D\n' # noqa: E501 + 'Example 4: \n' + 'Question: \nWhich of the boxes comes next in the sequence? Select from A-E\n\n' + 'Answer: \nThe sequence of the boxes is A, B, C, D, E.\n\nYour output: ABCDE\n' + 'Example 5: \n' + 'Question: \n{}\n\nAnswer: \n{}\n\nYour output: ' + ) + + return tmpl.format(question, prediction) + + +def LogicVista_auxeval(model, line): + prompt = build_prompt_logicvista(line) + print(prompt) + log = '' + retry = 5 + + for i in range(retry): + prediction = line['prediction'] + res = model.generate(prompt, temperature=i * 0.5) + answer = line['answer'].split(", ") + for j in range(0, len(answer)): + answer[j] = answer[j].lower() + answer.sort() + answer = ''.join(answer) + + if FAIL_MSG in res: + log += f'Try {i}: output is {prediction}, failed to parse.\n' + elif not res.isupper() or not res.isalpha(): + log += f'Try {i}: output is {prediction}, failed to parse.\n' + else: + log += 'Succeed' + hit = 0 + extracted = [alpha.lower() for alpha in res] + extracted.sort() + extracted = ''.join(extracted) + if extracted == answer: + hit = 1 + return dict(log=log, res=res, hit=hit) + log += 'All 5 retries failed.\n' + return dict(log=log, res='', hit=0) + + +cat = ["diagram", "ocr", "patterns", "graphs", "tables", "3d shapes", "puzzles", "sequences", "physics"] + + +def evaluate_logicvista(file_path): + df = load(file_path) + + tot = defaultdict(lambda: 0) + hit = defaultdict(lambda: 0) + acc = defaultdict(lambda: 0) + + _ = len(df) + _ = [] + + df_tot = df + + df_inductive = df[df["skill"].str.contains("inductive")] + df_deductive = df[df["skill"].str.contains("deductive")] + df_numerical = df[df["skill"].str.contains("numerical")] + df_spatial = df[df["skill"].str.contains("spatial")] + df_mechanical = df[df["skill"].str.contains("mechanical")] + + tot_correct = df_tot["hit"].sum() + tot_acc = (tot_correct / df_tot.shape[0]) * 100 + tot['Overall'] = df_tot.shape[0] + hit['Overall'] = tot_correct + acc['Overall'] = tot_acc + + inductive_correct = df_inductive["hit"].sum() + inductive_acc = (inductive_correct / df_inductive.shape[0]) * 100 + + tot["inductive"] = df_inductive.shape[0] + hit["inductive"] = inductive_correct + acc["inductive"] = inductive_acc + + deductive_correct = df_deductive["hit"].sum() + deductive_acc = (deductive_correct / df_deductive.shape[0]) * 100 + + tot["deductive"] = df_deductive.shape[0] + hit["deductive"] = deductive_correct + acc["deductive"] = deductive_acc + + numerical_correct = df_numerical["hit"].sum() + numerical_acc = (numerical_correct / df_numerical.shape[0]) * 100 + + tot["numerical"] = df_numerical.shape[0] + hit["numerical"] = numerical_correct + acc["numerical"] = numerical_acc + + spatial_correct = df_spatial["hit"].sum() + spatial_acc = (spatial_correct / df_spatial.shape[0]) * 100 + + tot["spatial"] = df_spatial.shape[0] + hit["spatial"] = spatial_correct + acc["spatial"] = spatial_acc + + mechanical_correct = df_mechanical["hit"].sum() + mechanical_acc = (mechanical_correct / df_mechanical.shape[0]) * 100 + + tot["mechanical"] = df_mechanical.shape[0] + hit["mechanical"] = mechanical_correct + acc["mechanical"] = mechanical_acc + + # capability dimension, the official data json does not contain 'capability' column, so it is now ignored + # for i in cat: + # curr = df[df["capability"].str.contains(i.replace(" ", ""))] + # correct = curr["hit"].sum() + # accuracy = (correct / curr.shape[0]) * 100 + # tot[i] = curr.shape[0] + # hit[i] = correct + # acc[i] = accuracy + + res = defaultdict(list) + for k in tot.keys(): + res['Task&Skill'].append(k) + res['tot'].append(tot[k]) + res['hit'].append(hit[k]) + res['acc'].append(acc[k]) + res = pd.DataFrame(res) + return res diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/__init__.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc8261175d1a50a1bc3a1f5988b410cabb221542 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/__init__.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/aggregation_type.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/aggregation_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f7d950be3756ac3b40bb5a485f9f37a398e2313 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/aggregation_type.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/metric_type.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/metric_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11329719351ab5635aefae124fada830410d8304 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/__pycache__/metric_type.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/mean_agg.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/mean_agg.py new file mode 100644 index 0000000000000000000000000000000000000000..13d675fa3e73e7c3233185763a73e440c703e518 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/mean_agg.py @@ -0,0 +1,22 @@ +from numbers import Number +from typing import Dict +import numpy as np + + +class MeanAggregation: + """Take the mean of all valid scores.""" + + @staticmethod + def aggregate(scores: Dict[str, Number], weights: Dict[str, Number]) -> Number: + """Exact match between targets and responses.""" + filtered_scores = {f: s for f, s in scores.items() if s >= 0} + if not filtered_scores: + return -1 + + # Align the key order + flattened_scores = [] + flattened_weights = [] + for field in filtered_scores: + flattened_scores.append(filtered_scores[field]) + flattened_weights.append(weights[field]) + return np.average(flattened_scores, weights=flattened_weights) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/unsupported_agg.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/unsupported_agg.py new file mode 100644 index 0000000000000000000000000000000000000000..1cd9991b67130a7be359a75c32d953ecda68a4b8 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/aggregation/unsupported_agg.py @@ -0,0 +1,8 @@ +from numbers import Number +from typing import Dict + + +class UnsupportedAggregation: + @staticmethod + def aggregate(scores: Dict[str, Number], weights: Dict[str, Number]) -> Number: + return -1 diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/chess_jaccard.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/chess_jaccard.py new file mode 100644 index 0000000000000000000000000000000000000000..e128e1cd09c78e0b73c25248ace0e4af31d5536d --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/chess_jaccard.py @@ -0,0 +1,24 @@ +import logging +from typing import Dict, Any +from .common.conversions import str_to_set +from .common.metrics import jaccard_index + + +def chess_transform(move_sequence: str) -> set: + """Transform a sequence of chess moves encoded in SAN into a set.""" + move_sequence = str_to_set(move_sequence) + return {move_san.removesuffix("!").removesuffix("#") for move_san in move_sequence} + + +class ChessMoveJaccard: + """Calculates the Jacard index for chess moves.""" + + @classmethod + def match(cls, responses: str | None, targets: str) -> float: + """Exact match between targets and responses.""" + if responses is None: + return 0 + responses = chess_transform(responses) + targets = chess_transform(targets) + + return jaccard_index(responses, targets) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/constrained_generation.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/constrained_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..88edc15a8279068be02758d8409ed7ad0f957d02 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/constrained_generation.py @@ -0,0 +1,518 @@ +import collections +import itertools +import logging +from numbers import Number +import re +from nltk.tokenize import sent_tokenize, word_tokenize +from nltk.stem import WordNetLemmatizer +from nltk.corpus import wordnet +import pronouncing +from .common.conversions import str_to_iterable +from ..parsing.common.parsers import parse_nested_str_list, parse_syllable_ranges +import signal + + +def custom_lemmatize(word, lemmatizer): + """ + Custom lemmatization to handle special cases like 'puppies' -> 'puppy'. + """ + lemma = lemmatizer.lemmatize(word, wordnet.NOUN) + + # Handle irregular plural forms manually + if word.endswith("ies") and lemma.endswith("y"): + lemma = lemma[:-1] + "y" + elif word.endswith("ves") and lemma.endswith("f"): + lemma = lemma[:-1] + "f" + + return lemma + + +def lemmatize_phrase(phrase, lemmatizer): + """ + Lemmatizes a phrase (multiple words). + """ + words = word_tokenize(phrase.lower()) + lemmatized_words = [custom_lemmatize(word, lemmatizer) for word in words] + return " ".join(lemmatized_words) + + +custom_phones_for_word = { + "'gainst": ["G EH1 N S T", "G EY1 N S T"], + "'midst": ["M IH1 D S T"], + "'mongst": ["M AH1 NG S T"], + "'neath": ["N IY1 TH"], + "beguiles": ["B IH0 G AY1 L Z"], + "cerulean": ["S ER0 UW1 L IY0 AH0 N"], + "doggo": ["D AO1 G OW0"], + "downtorn": ["D AW1 N T AO2 R N"], + "enthrall": ["EH0 N TH R AO1 L"], + "fam'ly": ["F AE1 M L IY0"], + "fiery": ["F AY1 ER0 IY0", "F AY1 R IY0"], + "flits": ["F L IH1 T S"], + "furred": ["F ER1 D"], + "kneels": ["N IY1 L Z"], + "o'er": ["OW1 ER0"], + "orbs": ["AO1 R B Z"], + "quenched": ["K W EH1 N CH D"], + "quietude": ["K W AY1 AH0 T UW0 D"], + "retold": ["R IY0 T OW1 L D"], + "scurries": ["S K ER1 IY0 Z"], + "sunbeams": ["S AH1 N B IY2 M Z"], + "syncs": ["S IH1 NG K S"], + "'twixt": ["T W IH1 K S T"], +} + + +file_logger = logging.getLogger("errorLogger") + + +def phones_for_word(text: str) -> list[str]: + """A wrapper for pronouncingpy's phones_for_word to handle out-of-vocab issues.""" + text = text.replace("’", "'").lower() + + suffixes = [""] + prefixes = [""] + prefixes_to_remove = [""] + if text.endswith("'s"): + suffixes = [" Z"] + text = text.removesuffix("'s") + + if text in custom_phones_for_word: + return [ + pr + suffix + for pr, suffix in itertools.product(custom_phones_for_word[text], suffixes) + ] + + pronunciations = pronouncing.phones_for_word(text) + + # Guess pronunciation from word stem + if not pronunciations: + if suffixes[0] != "": # "'s doesn't really work with the rest." + file_logger.error(f"OOV: {text}") + return [] + + if text.endswith("ed"): + suffixes = [" D", " T", " AH0 D", " IH0 D"] + text = text.removesuffix("ed") + elif text.endswith("s"): + # Some words, like bustles, end with es but the plural suffix is s. + if pronouncing.phones_for_word(text.removesuffix("s")): + # On the other hand, pierces is two syllables but pierce is one. + if text.endswith("es"): + suffixes = [" S", " Z", " AH0 Z", " IH0 Z"] + text = text.removesuffix("s") + else: + suffixes = [" S", " Z"] + text = text.removesuffix("s") + elif text.endswith("es"): + suffixes = [" AH0 Z", " IH0 Z"] + text = text.removesuffix("es") + if text.startswith("un"): + prefixes = ["AH0 N "] + text = text.removeprefix("un") + elif text.startswith("'"): + if pronouncing.phones_for_word("a" + text.removeprefix("'")): + prefixes_to_remove = ["AH0 "] + text = "a" + text.removeprefix("'") + pronunciations = pronouncing.phones_for_word(text) + pronunciations = [ + (prefix + pr + suffix).removeprefix(prefix_to_remove) + for prefix, pr, suffix, prefix_to_remove in itertools.product( + prefixes, pronunciations, suffixes, prefixes_to_remove + ) + ] + + if not pronunciations: + file_logger.error(f"OOV: {text}") + return pronunciations + + +def rhyming_part_include_unstressed(phones: str) -> str: + """Get the "rhyming part" of a string with CMUdict phones. + + "Rhyming part" here means everything from the vowel in the + last syllable up to the end of the word. + + Example: + >>> import pronouncing + >>> phones = pronouncing.phones_for_word("purple") + >>> rhyming_part_include_unstressed(phones[0]) + 'AH0 L' + + Args: + phones: a string containing space-separated CMUdict phones + + Returns: + a string with just the "rhyming part" of those phones + """ + phones_list = phones.split() + for i in range(len(phones_list) - 1, 0, -1): + if phones_list[i][-1] in "012": + phones = " ".join(phones_list[i:]) + break + return re.sub(r"\d", "", phones) + + +def count_syllables(text: str) -> list[int]: + """Count the number of syllables in a piece of text.""" + pronunciations = [phones_for_word(p) for p in text.split()] + syllable_counts = [] + for pronun_possibility in itertools.product(*pronunciations): + syllable_counts.append( + sum([pronouncing.syllable_count(p) for p in pronun_possibility]) + ) + return syllable_counts + + +def find_string_occurrences_with_variations(text, search_string): + lemmatizer = WordNetLemmatizer() + + # Lemmatize the entire search phrase + search_lemma = lemmatize_phrase(search_string, lemmatizer) + + # Tokenize the text into sentences + sentences = sent_tokenize(text) + + occurrences = [] + total_count = 0 + + # Iterate over each sentence + for i, sentence in enumerate(sentences, 1): # Sentence numbers start from 1 + # Lemmatize the entire sentence + lemmatized_sentence = lemmatize_phrase(sentence, lemmatizer) + + # Count occurrences of the lemmatized search phrase in the lemmatized sentence + count_in_sentence = lemmatized_sentence.count(search_lemma) + if count_in_sentence > 0: + occurrences.append((i, count_in_sentence)) + total_count += count_in_sentence + + return total_count, occurrences + + +def word_to_stresses(word: str) -> list[list[int]]: + """Convert a word to a list of stresses, for each valid pronunciation.""" + pronunciations = phones_for_word(word) + stresses = { + tuple(int(stress) for stress in pronouncing.stresses(pronunc)) + for pronunc in pronunciations + } + return [list(pronunc_stresses) for pronunc_stresses in stresses] + + +def is_iambic_pair(stress1: int, stress2: int) -> bool: + """Whether the pair of stresses is a valid iambic pair.""" + valid_pairs = {(2, 1), (0, 2), (0, 1), (0, 0), (1, 1), (2, 2)} + return (stress1, stress2) in valid_pairs + + +def grouper_ignore_last(iterable, n): + "Collect data into fixed-length chunks or blocks" + # grouper('ABCDEFG', 3) --> ABC DEF + args = [iter(iterable)] * n + return zip(*args) + + +def is_line_iambic(line: str) -> bool: + """Determine if a line is iambic.""" + words = line.split() + stress_options = [word_to_stresses(word) for word in words] + + def backtrack(word_index: int, syllable_index: int, prev_stress: int) -> bool: + """Using backtracking, determine if there is a sequence of pronunciations that is in iambic pentameter.""" + if word_index == len(words): + # At this point, syllable_index is the number of syllables + return syllable_index % 2 == 0 + + for stress_pattern in stress_options[word_index]: + word_syllable_index = 0 + if syllable_index % 2 != 0: + current_stress = stress_pattern[word_syllable_index] + if not is_iambic_pair(prev_stress, current_stress): + continue + word_syllable_index += 1 + + word_valid_iambic_pairs = True + for stress1, stress2 in grouper_ignore_last( + stress_pattern[word_syllable_index:], 2 + ): + if not is_iambic_pair(stress1, stress2): + word_valid_iambic_pairs = False + break + word_syllable_index += 2 + if not word_valid_iambic_pairs: + continue + + if word_syllable_index < len(stress_pattern): + assert word_syllable_index + 1 == len(stress_pattern) + next_stress = stress_pattern[word_syllable_index] + if backtrack( + word_index + 1, + syllable_index + word_syllable_index + 1, + next_stress, + ): + return True + else: + assert word_syllable_index == len(stress_pattern) + if backtrack(word_index + 1, syllable_index + word_syllable_index, -1): + return True + + return False + + return backtrack( + 0, 0, -1 + ) # Start with -1 as prev_stress as a placeholder for the first syllable + + +def parse_constraints(key_string, value_string): + key_components = key_string.strip().split("##") + # Remove trailing numbers from each key + key_components = [re.sub(r"\d+$", "", key) for key in key_components] + # Extract value components by splitting on ## + value_components = value_string.strip().split("##") + # Clean value components by removing brackets and spaces + value_components = [comp.strip().strip('"').strip() for comp in value_components] + + # Handle cases where we expect integers + for i, value in enumerate(value_components): + if value.isdigit(): + value_components[i] = int(value) + + # Combine keys and values into a dictionary + if len(key_components) == len(value_components): + result = { + key.lower(): value for key, value in zip(key_components, value_components) + } + elif len(key_components) == 1 and len(value_components) == 1: + result = {key_components[0].lower(): value_components[0]} + else: + raise ValueError("Mismatch between number of keys and values.") + + return result + + +def check_constraint(response, constraint, constraint_val): + if constraint_val.strip() == "": + # empty contraint (placeholder), directly return 1 + return 1 + elif "contain" in constraint: + occurs_records = {} + parsed_constraint = parse_constraints(constraint, constraint_val) + response = response.replace("**", "") # Remove markdown around bolded letters + if "contain_only" in parsed_constraint: + num_satisfied = 0 + conditions = parse_nested_str_list(parsed_constraint["contain_only"]) + for cond in conditions: + count, occurs = 0, [] + for item in cond: # check one condition + count_, occurs_ = find_string_occurrences_with_variations( + response, item + ) + if count_ > 0: + count += count_ + occurs.extend(occurs_) + if count > 0: + num_satisfied += 1 + occurs_records[tuple(cond)] = occurs + score = 1 if num_satisfied == 1 else 0 + else: # the vanilla "contain" constraint + items = str_to_iterable(list, parsed_constraint["contain"]) + count, occurs = 0, [] + for item in items: + count_, occurs_ = find_string_occurrences_with_variations( + response, item + ) + if count_ > 0: + count += count_ + occurs.extend(occurs_) + if count > 0: + occurs_records[tuple(items)] = occurs + score = 0 if count == 0 else 1 + + ## Other logics like position or repeat, only check when + ## previous "contain" consraint passes + if score > 0: + occurs = list(occurs_records.values())[0] + if "position_only" in parsed_constraint: + pos = parsed_constraint["position_only"] + score = 1 if len(occurs) == 1 and occurs[0][0] == pos else 0 + return score + elif "position" in parsed_constraint: + pos = parsed_constraint["position"] + occurs_sent_ids = [item[0] for item in occurs] + score = 1 if pos in occurs_sent_ids else 0 + + # check occurance times + if "times" in parsed_constraint: + repeat_times = parsed_constraint["times"] + total_occurs = sum([item[1] for item in occurs]) + score = 1 if total_occurs == repeat_times else 0 + + elif "length" in constraint: + try: + len_constraint = int(constraint_val[1:]) + words = re.findall(r"\b\w+\b", response) + if constraint_val.strip() == "": + score = 1 ## dummy placeholder constraint, score is 1 + elif constraint_val[0] == "<": + score = 1 if len(words) < len_constraint else 0 + elif constraint_val[0] == ">": + score = 1 if len(words) > len_constraint else 0 + else: + file_logger.warning(f"Unknown length info {constraint_val}") + except ValueError: + file_logger.warning(f"Wrong length info {constraint_val}") + score = 0 + elif "acrostic" in constraint: + response = response.replace("**", "") + + lines = response.strip().lower().split("\n") + if len(lines) != len(constraint_val): + return 0 + all_match = True + if "acrostic_alliteration" in constraint: + for line, letter in zip(lines, constraint_val.lower()): + line = line.strip() + if letter == " ": + if line != "": + all_match = False + break + elif not line or not all(word[0] == letter for word in line.split(" ")): + all_match = False + break + else: + for line, letter in zip(lines, constraint_val.lower()): + line = line.strip() + if letter == " ": + if line != "": + all_match = False + break + elif not line or not line[0] == letter: + all_match = False + break + score = 1 if all_match else 0 + else: + response = response.strip() + response = response.replace(".", "") + response = response.replace(",", "") + response = response.replace("!", "") + response = response.replace("?", "") + response = response.replace(":", "") + response = response.replace(";", "") + response = response.replace('"', "") + response = response.replace("-", " ") + response = response.replace("—", " ") + response = re.sub( + " *\(\w\) *(?=\n|$)", "", response + ) # The parenthesized letter in the rhyming scheme + + lines = response.lower().split("\n") + match constraint: + case "syllables": + syllable_count_intervals = parse_syllable_ranges(constraint_val) + if len(lines) != len(syllable_count_intervals): + return 0 + try: + all_match = all( + any( + min_count <= syll_count <= max_count + for syll_count in count_syllables(line) + ) + for line, (min_count, max_count) in zip( + lines, syllable_count_intervals + ) + ) + except IndexError: + all_match = None + score = 1 if all_match else 0 + case "rhyming_scheme": + # Ensure that the number of lines is the same as the number in the rhyming scheme + if len(lines) != len(constraint_val): + return 0 + last_words = [] + for line in lines: + if line.strip(): # Check if line has non-whitespace content + words = line.split() + last_words.append(words[-1] if words else "") + else: + last_words.append("") + + # Map each rhyming scheme letter to the last word of a line + letter_to_words = collections.defaultdict(set) + for rhyme_letter, word in zip(constraint_val, last_words): + if rhyme_letter == " ": + if word != "": + return 0 + else: + letter_to_words[rhyme_letter].add(word) + + # Check that 1. The words for the same letter all rhyme + letter_to_rhyming_parts = {} + for letter, words in letter_to_words.items(): + rhyming_parts: list[set[str]] = [ + { + rhyming_part_include_unstressed(pronunciations) + for pronunciations in phones_for_word(word) + } + for word in words + ] + common_rhyming_parts = set.intersection(*rhyming_parts) + if not common_rhyming_parts: + return 0 + letter_to_rhyming_parts[letter] = common_rhyming_parts + # Check that 2. The words for different letters do not rhyme + for a, b in itertools.combinations(letter_to_rhyming_parts, 2): + # To simplify things, if there are any shared pronunciations between two different letters, we reject it + if letter_to_rhyming_parts[a] & letter_to_rhyming_parts[b]: + return 0 + score = 1 + case "poetry_meter": + all_match = all(is_line_iambic(line) for line in lines) + score = 1 if all_match else 0 + case _: + file_logger.warning(f"Unknown constraint type {constraint}") + score = 0 + + return score + + +class ConstrainedGenerationEval: + """ + Constrained generation metric + """ + + timeout = 10 + + @classmethod + def match(cls, response, constraints) -> Number: + scores = [] + eval_results = {} + + def handler(signum, frame): + raise TimeoutError() + + def check_with_timeout(constraint, constraint_val): + # Set the signal handler and a timeout + signal.signal(signal.SIGALRM, handler) + signal.alarm(cls.timeout) # Set the timeout + + try: + # Try to check the constraint + score = check_constraint(response, constraint, constraint_val) + except TimeoutError: + print(f"Timeout reached for constraint: {constraint}") + score = 0 # Set score to 0 if timeout occurs + finally: + signal.alarm(0) # Reset the alarm + + return score + + for constraint, constraint_val in constraints.items(): + score = check_with_timeout(constraint, constraint_val) + scores.append(score) + eval_results[constraint] = score + + final_score = min(scores) + eval_info = "\t".join([f"{key}: {val}" for key, val in eval_results.items()]) + + return final_score, eval_info diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/coordinate_sequence_match.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/coordinate_sequence_match.py new file mode 100644 index 0000000000000000000000000000000000000000..8f384d9ad69e135847553947ea97c41c03709fa8 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/coordinate_sequence_match.py @@ -0,0 +1,65 @@ +import logging +from .common.conversions import str_to_coords +import numpy as np + + +class CoordsSequenceSimilarity: + """ + Measure the similarity between two list of coordinates, used for keypoint estimation tasks + """ + + @staticmethod + def compute_score(pred_keypoints, gt_keypoints, k=10): + """ + Compute the evaluation score for keypoint estimation. + + Args: + pred_keypoints (list or np.ndarray): List or array of predicted keypoint coordinates, + each as (x, y), normalized to [0, 1]. + gt_keypoints (list or np.ndarray): List or array of ground truth keypoint coordinates, + each as (x, y), normalized to [0, 1]. + + Returns: + float: A score between 0 and 1, where 1 indicates perfect accuracy, + and 0 indicates completely wrong. + """ + # Convert inputs to NumPy arrays + try: + pred_keypoints = np.array(pred_keypoints) + except ValueError: + # Format is not a correct + return 0 + + gt_keypoints = np.array(gt_keypoints) + + # shape mismatch, directly assign 0 score + if pred_keypoints.shape != gt_keypoints.shape: + return 0 + + # Compute Euclidean distances between corresponding keypoints + distances = np.linalg.norm(pred_keypoints - gt_keypoints, axis=1) + + # Maximum possible distance in normalized coordinate space + max_distance = np.sqrt(2) + + # Normalize distances + normalized_distances = distances / max_distance + + # Compute per-keypoint scores using exponential decay + per_keypoint_scores = np.exp(-k * normalized_distances) + + # Compute the average score across all keypoints + score = np.mean(per_keypoint_scores) + + return score + + @classmethod + def match(cls, responses, targets) -> float: + """Exact match between targets and responses.""" + logging.debug(f"{responses=}, {targets=}") + if not isinstance(responses, (tuple | list)): + responses = str_to_coords(responses, dim=2) + if not isinstance(targets, (tuple | list)): + targets = str_to_coords(targets, dim=2) + + return cls.compute_score(responses, targets) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_jaccard_agg_jaccard.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_jaccard_agg_jaccard.py new file mode 100644 index 0000000000000000000000000000000000000000..5f578ff2e7e4e489a77f2923bbafd2a4b6a91455 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_jaccard_agg_jaccard.py @@ -0,0 +1,28 @@ +from .jaccard import Jaccard +from .common.conversions import cast_to_dict + + +class DictJaccardAggJaccard: + """Calculates the Jaccard index, dividing by the union of the predictions. + + 1. Calculates the Jaccard index for all sets with the same key, + if it appears in either pred or targets + 2. Calculates the total, then divides by the size of the union + """ + + @classmethod + def match(cls, responses, targets) -> float: + """Return the aggregated Jaccard index between targets and responses.""" + responses = cast_to_dict(responses) + if not isinstance(responses, dict): + return 0 + + all_keys = set(responses) | set(targets) + + num_keys = 0 + total_score = 0 + for key in all_keys: + total_score += Jaccard.match(responses.get(key, []), targets.get(key, [])) + num_keys += 1 + + return total_score / num_keys diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_set_equality_agg_jaccard.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_set_equality_agg_jaccard.py new file mode 100644 index 0000000000000000000000000000000000000000..bc5cf47e492bd80a9f7c1506caec09c576c684eb --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/dict_set_equality_agg_jaccard.py @@ -0,0 +1,28 @@ +from vlmeval.dataset.utils.megabench.scoring.set_equality import SetEquality + + +class DictSetEqualityAggJaccard: + """Calculates the average set equality across the dict. + + 1. Calculates the set equality for all sets with the same key, + if it appears in either pred or targets + 2. Calculates the total, then divides by the size of the union + """ + + @classmethod + def match(cls, responses, targets) -> float: + """Return the aggregated Jaccard index between targets and responses.""" + if not isinstance(responses, dict): + return 0 + + all_keys = set(responses) | set(targets) + + num_keys = 0 + total_score = 0 + for key in all_keys: + total_score += SetEquality.match( + responses.get(key, []), targets.get(key, []) + ) + num_keys += 1 + + return total_score / num_keys diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/exact_str_match_case_insensitive.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/exact_str_match_case_insensitive.py new file mode 100644 index 0000000000000000000000000000000000000000..7dee848b6a0c2086f090823653a36e6b93ba7339 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/exact_str_match_case_insensitive.py @@ -0,0 +1,12 @@ +from .exact_str_match import ExactStrMatch + + +class ExactStrMatchCaseInsensitive: + """Case-insensitive exact string matching.""" + + @staticmethod + def match(response, correct_answer) -> int: + """Case-insensitive exact match between targets and responses.""" + if not isinstance(response, str) and isinstance(correct_answer, str): + return 0 + return ExactStrMatch.match(response.lower(), correct_answer.lower()) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/geo_proximity.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/geo_proximity.py new file mode 100644 index 0000000000000000000000000000000000000000..4422516197f8b009158e26244a1f0bddb548b8c5 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/geo_proximity.py @@ -0,0 +1,100 @@ +import functools +import logging +import math +import random +from geopy.distance import distance +from geopy.geocoders import Nominatim +from geopy.extra.rate_limiter import RateLimiter + + +USER_AGENT_SUFFIX = hex(random.getrandbits(128))[2:] +geolocator = Nominatim(user_agent=f"vlm-mega-benchmark_{USER_AGENT_SUFFIX}") + + +error_logger = logging.getLogger("errorLogger") + + +def calculate_proximity_score(guess_coords, actual_coords, k=100): + """Calculate the proximity score based on the location. + + Exponentially decreases depending on the distance. + + Args: + guess_coords (float, float): The longitude and latitude of the guessed coordinates. + actual_coords (float, float): The longitude and latitude of the actual coordinates. + k (numbers.Number): The threshold (in km) at which we get a score of 0.5. + """ + dist = distance(guess_coords, actual_coords).km + proximity_score = math.exp(-dist / k) + return proximity_score + + +GEOLOCATION_TIMEOUT = 1 +MAX_RETRIES = 30 + + +geocode = RateLimiter( + geolocator.geocode, min_delay_seconds=GEOLOCATION_TIMEOUT, max_retries=MAX_RETRIES +) + + +@functools.cache +def try_geolocate(query): + """Try to look up the location.""" + location = geocode(query) + if location is None: + error_logger.error( + f"Geolocation API request failed due to timeout: exceeded {MAX_RETRIES} retries!" + ) + return location + + +def location_to_coords( + country: str, province_or_state: str, municipality: str +) -> tuple[float, float] | None: + if country == "" or province_or_state == "" or municipality == "": + return None + """Convert the location to longitude and latitude.""" + location = geolocator.geocode( + query={"country": country, "state": province_or_state, "city": municipality} + ) + if location is not None: + return (location.latitude, location.longitude) + # Try searching without the province/state, as it can be non-standard for some questions + location = geolocator.geocode(query={"country": country, "city": municipality}) + if location is None: + return None + return (location.latitude, location.longitude) + + +class GeoProximityLocationDict: + """Return a score based on the distance between two locations.""" + + @classmethod + def match(cls, responses, targets) -> float: + """Return a score based on how far two targets are away from each other, + where each field is a dict with the following schema: + { + country: str, + province_or_state: str, + municipality: str + } + """ + try: + guess_coords = location_to_coords(**responses) + except: + return 0 + + if guess_coords is None: + error_logger.error( + f"GeoProximityLocationDict: could not load co-ordinates for {responses=}" + ) + return 0 + actual_coords = location_to_coords(**targets) + if actual_coords is None: + error_logger.error( + f"GeoProximityLocationDict: could not load co-ordinates for {targets=}" + ) + return 0 + + return calculate_proximity_score(guess_coords, actual_coords) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/gleu.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/gleu.py new file mode 100644 index 0000000000000000000000000000000000000000..6fb514fba2da103c9c22235fd9aa2c83ca83e0c7 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/gleu.py @@ -0,0 +1,17 @@ +from numbers import Number +import jieba +from nltk.translate.gleu_score import sentence_gleu + + +class GLEUChinese: + """Compute GLEU score for Chinese text.""" + + @staticmethod + def match(response, correct_answer) -> Number: + """Compute the BLEU scores between two strings.""" + if isinstance(response, str) and isinstance(correct_answer, str): + reference_tokens = list(jieba.cut_for_search(response)) + translation_tokens = list(jieba.cut_for_search(correct_answer)) + else: + return 0 + return sentence_gleu([reference_tokens], translation_tokens) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/jaccard.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/jaccard.py new file mode 100644 index 0000000000000000000000000000000000000000..7695e473ae66060b02795e370f94fc67642ee14a --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/jaccard.py @@ -0,0 +1,75 @@ +from .common.conversions import cast_to_set +from .common.metrics import jaccard_index + + +class Jaccard: + """Calculates the Jacard index for iterables.""" + + @classmethod + def match(cls, responses, targets) -> float: + """Exact match between targets and responses.""" + if responses is None: + return 0 + responses = cast_to_set(responses) + targets = cast_to_set(targets) + + return jaccard_index(responses, targets) + + +class JaccardCaseInsensitive: + """Calculates the Jacard index for iterables of strings, + Do not consider the case + """ + + @classmethod + def match(cls, responses, targets) -> float: + """Exact match between targets and responses.""" + if responses is None: + return 0 + responses = cast_to_set(responses) + targets = cast_to_set(targets) + + if isinstance(list(targets)[0], str): + new_responses = { + item.lower() if isinstance(item, str) else str(item).lower() + for item in responses + } + new_targets = {item.lower() for item in targets} + elif isinstance(list(targets)[0], tuple): + new_responses = set() + new_targets = set() + try: + for res in responses: + new_res = tuple( + [ + item.lower() + .replace(" ", "") + .replace("-", "") + .replace("\n", "") + .replace("\t", "") + .replace("_", "") + .replace(".", "") + for item in res + ] + ) + new_responses.add(new_res) + except: # the data type of the response might be wrong, return 0 in this case + return 0 + for tgt in targets: + new_tgt = tuple( + [ + item.lower() + .replace(" ", "") + .replace("-", "") + .replace("\n", "") + .replace("\t", "") + .replace("_", "") + .replace(".", "") + for item in tgt + ] + ) + new_targets.add(new_tgt) + else: + return 0 + + return jaccard_index(new_responses, new_targets) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/latex_expr_equality.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/latex_expr_equality.py new file mode 100644 index 0000000000000000000000000000000000000000..79a14a3dde269df68f76e572e67693844f294a61 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/latex_expr_equality.py @@ -0,0 +1,96 @@ +import re +from sympy.parsing.latex import parse_latex +from sympy.parsing.latex.errors import LaTeXParsingError +from sympy.core.sympify import SympifyError +from .common.transformations import normalize_latex +from .simple_str_match import SimpleStrMatch +import signal + + +class TimeoutException(Exception): + pass + + +def timeout_handler(signum, frame): + raise TimeoutException() + + +class LatexExprEquality: + """Determines if two LaTeX expressions are equal.""" + + @classmethod + def match(cls, responses, targets, timeout_duration=15) -> int: + """Whether two LaTeX expressions are equal.""" + if not isinstance(responses, str) or not isinstance(targets, str): + return 0 + signal.signal(signal.SIGALRM, timeout_handler) + signal.alarm(timeout_duration) # Set the timeout duration in seconds + try: + # seems that this eval can get stuck when evaluating all tasks.. + responses = normalize_latex(responses) + targets = normalize_latex(targets) + responses_expr = parse_latex(responses) + targets_expr = parse_latex(targets) + result = 1 if responses_expr.equals(targets_expr) else 0 + return result + except ( + LaTeXParsingError, + SympifyError, + TypeError, + TimeoutException, + NotImplementedError, + ): + return SimpleStrMatch.match(responses, targets) + finally: + signal.alarm(0) # Cancel the alarm if it completes successfully + + +def separate_text_and_latex(text): + # Regular expression to match LaTeX content between $ symbols + pattern = r"(\$[^$]*\$)" + + # Split the text based on LaTeX parts + parts = re.split(pattern, text) + + # Separate plain text and LaTeX + latex_content = [] + plain_text = [] + + for part in parts: + if part.startswith("$") and part.endswith("$"): + latex_content.append(part) + else: + plain_text.append(part.strip()) + + return plain_text, latex_content + + +def join_latex(latex_exps): + result = [] + for exp in latex_exps: + result.append(exp[1:-1].strip().replace(",", "")) + result = f"{' '.join(result)}" + return result + + +class TextLatexExprEquality: + """Determines if two LaTeX expressions are equal.""" + + @classmethod + def match(cls, responses, targets) -> int: + """Whether two LaTeX expressions are equal.""" + if not isinstance(responses, str) or not isinstance(targets, str): + return 0 + + tgt_texts, tgt_latex = separate_text_and_latex(targets) + res_texts, res_latex = separate_text_and_latex(responses) + + res_text_join = "".join(res_texts).replace(",", "") + tgt_text_join = "".join(tgt_texts).replace(",", "") + text_match = SimpleStrMatch.match(res_text_join, tgt_text_join) + + res_latex_join = join_latex(res_latex) + tgt_latex_join = join_latex(tgt_latex) + latex_match = LatexExprEquality.match(res_latex_join, tgt_latex_join) + + return 1 if text_match and latex_match else 0 diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/program_judge.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/program_judge.py new file mode 100644 index 0000000000000000000000000000000000000000..276be817a1558190d6c95cc0200ad837222bcba8 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/program_judge.py @@ -0,0 +1,141 @@ +import io +import pathlib +import json +import multiprocessing +from unittest.mock import patch +from multiprocessing.queues import Empty + +BIG_BENCH_PATH = pathlib.Path(__file__).resolve().parent.parent.parent + + +class ProgramJudge: + """Program Judging.""" + + # Check if results have been saved for this metric instance + # prevent duplicate saving results + task_saved = {} + + @classmethod + def save_test_results(cls, task_name, results, query_file): + query_base = pathlib.Path(query_file).parent + output_dir = query_base / "code_eval" + output_dir.mkdir(parents=True, exist_ok=True) + output_file = output_dir / f"{task_name}_test_case.json" + + saved = cls.task_saved.get(task_name, False) + if output_file.is_file() and saved: + with open(output_file, "r") as f: + existing_data = json.load(f) + existing_data.extend(results) + else: + existing_data = results + cls.task_saved[task_name] = True + with open(output_file, "w") as f: + json.dump(existing_data, f, indent=4) + + @staticmethod + def match(response: str, eval_context: str, task_info: str = None) -> int: + # Load all test cases from the benchmark_tasks directory + # task_name = task_info["task_name"] + # task_folder = task_info["task_folder"] + # query_results_file = task_info["results_file"] + + test_cases = eval_context["test_case"] + + # Create a CodeTester instance with the response and the found test cases + tester = CodeTester(response, test_cases) + score, results = tester.run_tests() + + # ProgramJudge.save_test_results(task_name, results, query_results_file) + return score + + +######################################################### +### Implementation of the automatic code tester +######################################################### + + +class CodeTester: + def __init__(self, user_code, test_cases, timeout=2, verbose=True): + self.user_code = user_code + self.test_cases = test_cases + self.timeout = timeout + self.verbose = verbose + + def run_user_code(self, input_data): + input_str = "\n".join(input_data) + "\n" + output_queue = multiprocessing.Queue() + process = multiprocessing.Process( + target=self.target, args=(output_queue, input_str) + ) + process.start() + + process.join(self.timeout) + if process.is_alive(): + process.terminate() + return f"ERROR: Code execution exceeded the time limit." + + try: + result = output_queue.get(timeout=10) # Add timeout for queue retrieval + except Empty: + return "ERROR: No output was produced before timeout." + finally: + output_queue.close() # Close the queue to release resources + output_queue.join_thread() # Ensure all items in the queue are processed + + return result + + def target(self, output_queue, input_str): + contains_main_block = 'if __name__ == "__main__":' in self.user_code + stdout = io.StringIO() + try: + with patch("builtins.input", side_effect=input_str.splitlines()): + with patch("sys.stdout", new=stdout): + if contains_main_block: + # If the user code contains the main block, execute in the context of __name__ == "__main__" + exec(self.user_code, {"__name__": "__main__"}) + else: + # Otherwise, just execute the user code directly + exec(self.user_code) + except Exception as e: + output_queue.put(f"ERROR during execution: {e}") + else: + output_queue.put(stdout.getvalue().rstrip()) + + def evaluate_test_case(self, input_data, expected_output): + output = self.run_user_code(input_data) + return output == expected_output.rstrip(), output + + def run_tests(self): + if isinstance(self.test_cases, dict): + self.test_cases = [self.test_cases] + total_tests = len(self.test_cases) + passed_tests = 0 + results = [] + + for i, test_case in enumerate(self.test_cases, 1): + result, output = self.evaluate_test_case( + test_case["input"], test_case["expected"] + ) + + test_result = { + "response": self.user_code, + "test_case": test_case["input"], + "output": output, + "expected": test_case["expected"], + "result": "Passed" if result else "Failed", + } + results.append(test_result) + + if result: + if self.verbose: + print(f"Test case {i}: Passed") + passed_tests += 1 + else: + if self.verbose: + print( + f"Test case {i}: Failed - Expected {test_case['expected']} but got {output}" + ) + + score = passed_tests / total_tests if total_tests > 0 else 0 + return score, results diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/sequence_equality.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/sequence_equality.py new file mode 100644 index 0000000000000000000000000000000000000000..7ef21103de37b8649b65f11b1af668a3c1c6859e --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/sequence_equality.py @@ -0,0 +1,62 @@ +from .common.conversions import str_to_list +from numbers import Number + + +class SequenceEquality: + """Determines how much of the first part of the list + was predicted correctly. + """ + + @classmethod + def match(cls, responses, targets) -> int: + """Exact match between targets and responses.""" + if not isinstance(responses, str): + responses = str(responses) + responses = str_to_list(responses) + targets = str_to_list(targets) + return 1 if responses == targets else 0 + + +class SequenceEqualityCaseInsensitive: + """Determines how much of the first part of the list + was predicted correctly. + """ + + @classmethod + def match(cls, responses, targets) -> int: + """Exact match between targets and responses.""" + if not isinstance(responses, str): + responses = str(responses) + responses = str_to_list(responses) + targets = str_to_list(targets) + + responses = [ + item.lower() if isinstance(item, str) else str(item) for item in responses + ] + targets = [item.lower() for item in targets] + return 1 if responses == targets else 0 + + +class SequenceAccuracyCaseInsensitive: + """Determines how much of the first part of the list + was predicted correctly. + """ + + @classmethod + def match(cls, responses, targets) -> int: + """Exact match between targets and responses.""" + responses = str_to_list(responses) + targets = str_to_list(targets) + if len(targets) != len(responses): + return 0 + correct = 0 + for res, tgt in zip(responses, targets): + if isinstance(tgt, str): + if res.lower() == tgt.lower(): + correct += 1 + elif isinstance(tgt, Number) and isinstance(res, Number): + if res == tgt: + correct += 1 + else: + pass + return correct / len(targets) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/set_equality.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/set_equality.py new file mode 100644 index 0000000000000000000000000000000000000000..01745bcf7cffe32c04573eae89edbbeba521033d --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/set_equality.py @@ -0,0 +1,74 @@ +from .common.conversions import cast_to_set, str_to_set + + +def _convert_to_hashable(item): + """将不可哈希的类型转换为可哈希类型""" + if isinstance(item, (list, tuple)): + return tuple(item) # 将列表转换为元组 + return item + + +class SetEquality: + """Determines whether two sets are equal.""" + + @classmethod + def match(cls, responses, targets) -> int: + """Exact match between targets and responses.""" + if isinstance(responses, (list, tuple)): + responses = {_convert_to_hashable(item) for item in responses} + if isinstance(targets, (list, tuple)): + targets = {_convert_to_hashable(item) for item in targets} + return 1 if responses == targets else 0 + + +class SetEqualityCaseInsensitive: + """Determines whether two sets are equal, ignoring string case.""" + + @classmethod + def match(cls, responses, targets) -> int: + """Exact match between targets and responses.""" + try: + responses: set[str] = {text.upper() for text in cast_to_set(responses)} + targets: set[str] = {text.upper() for text in cast_to_set(targets)} + except AttributeError: + return 0 + return 1 if responses == targets else 0 + + +class StringSetEqualityLineSplit: + """Determines whether two sets are equal, for string inputs, separated by line breaks""" + + @classmethod + def match(cls, responses, targets) -> int: + if "\\n" in targets: + targets = targets.replace("\\n", "\n") + if "\\n" in responses: + responses = responses.replace("\\n", "\n") + responses_set = set(responses.split("\n")) + targets_set = set(targets.split("\n")) + responses_set = { + item.lower() if isinstance(item, str) else item for item in responses_set + } + targets_set = { + item.lower() if isinstance(item, str) else item for item in targets_set + } + return 1 if responses_set == targets_set else 0 + + +class StringSetEqualityCommaSplit: + """Determines whether two sets are equal, for string inputs, separated by commas + Handles some corner cases that would fail the general SetEquality metric, like the string + with "None", which fails the eval. Also do case-insensitive eval. + """ + + @classmethod + def match(cls, responses, targets) -> int: + responses_set = str_to_set(responses) + targets_set = str_to_set(targets) + responses_set = { + item.lower() if isinstance(item, str) else item for item in responses_set + } + targets_set = { + item.lower() if isinstance(item, str) else item for item in targets_set + } + return 1 if responses_set == targets_set else 0 diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/xml_norm_point_in_bbox.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/xml_norm_point_in_bbox.py new file mode 100644 index 0000000000000000000000000000000000000000..929a2dcf4923718f1baba48941905aa126246cc9 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/scoring/xml_norm_point_in_bbox.py @@ -0,0 +1,35 @@ +from .common.conversions import parse_point_2d_from_xml, str_to_bboxes + + +class XmlNormPointInBbox: + """Determines whether a point is located in a bounding box. + + Assumes that co-ordinates are normalized between 0 and 1 and that the 2D point is + of the form x, y + """ + + @classmethod + def match(cls, responses, eval_context) -> int: + """Determine if the point is in the bounding box + and return which bounding box was matched, if any.""" + bounding_box_has_match = { + bbox: False for bbox in eval_context["bounding_boxes"] + } + bounding_boxes = [ + str_to_bboxes(bbox_str)[0] for bbox_str in eval_context["bounding_boxes"] + ] + assert bounding_boxes + + if not isinstance(responses, (tuple | list)): + responses = parse_point_2d_from_xml(responses) + if not responses: + return 0, bounding_box_has_match + elif len(responses) != 2: + return 0, bounding_box_has_match + + x, y = responses + for min_x, min_y, max_x, max_y in bounding_boxes: + if min_x <= x <= max_x and min_y <= y <= max_y: + bounding_box_has_match[str((min_x, min_y, max_x, max_y))] = True + return 1, bounding_box_has_match + return 0, bounding_box_has_match diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/analysis_utils.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/analysis_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6d7ff444e3ea1af7136e7d1111ffaaf25527c012 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/analysis_utils.py @@ -0,0 +1,182 @@ +import json +import ast +from collections import defaultdict +from typing import List, Dict, Any + +_DATASET_CACHE = {} +_SCORING_FUNCTIONS_CACHE = {} + +def _load_hf(subset_name: str) -> List[Dict[str, Any]]: + """ + Load the HF dataset for the given subset name. + """ + if subset_name in _DATASET_CACHE: + return _DATASET_CACHE[subset_name] + + from datasets import load_dataset + dataset = load_dataset("TIGER-Lab/MEGA-Bench", subset_name)["test"] + task_dict = {} + for sample in dataset: + task_name = sample["task_name"] + if task_name not in task_dict: + task_dict[task_name] = [] + task_dict[task_name].append(sample) + + _DATASET_CACHE[subset_name] = task_dict + return task_dict + + +def _get_scoring_functions(): + if _SCORING_FUNCTIONS_CACHE: + return _SCORING_FUNCTIONS_CACHE + + core_data = _load_hf("core") + open_data = _load_hf("open") + + core_scoring_functions = {} + open_scoring_functions = {} + + for task_name, task_samples in core_data.items(): + core_scoring_functions[task_name] = ast.literal_eval( + task_samples[0]["metric_info"] + ) + + for task_name, task_samples in open_data.items(): + open_scoring_functions[task_name] = ast.literal_eval( + task_samples[0]["metric_info"] + ) + + _SCORING_FUNCTIONS_CACHE["core"] = core_scoring_functions + _SCORING_FUNCTIONS_CACHE["open"] = open_scoring_functions + + return _SCORING_FUNCTIONS_CACHE + + +def _determine_eval_style(task): + """ + Determine the evaluation style (rule or llm) for a task. + """ + scoring_functions = _get_scoring_functions() + core_scoring_functions = scoring_functions["core"] + open_scoring_functions = scoring_functions["open"] + + task_name = task["task_name"] + if task_name in core_scoring_functions: + metric_info = core_scoring_functions[task_name] + elif task_name in open_scoring_functions: + metric_info = open_scoring_functions[task_name] + else: + raise ValueError(f"Task '{task_name}' not found in either core or open datasets") + + all_task_metrics = list(metric_info["field_score_function"].values()) + eval_type = ( + "rule" + if ( + "gpt_4o_as_judge" not in all_task_metrics + and "ascii_art_gpt4o_judge" not in all_task_metrics + ) + else "llm" + ) + return eval_type + + +def clear_cache(): + """ + Clear the cache and force re-loading the dataset. + """ + global _DATASET_CACHE, _SCORING_FUNCTIONS_CACHE + _DATASET_CACHE.clear() + _SCORING_FUNCTIONS_CACHE.clear() + + +def task_list_refine(task_list): + task_results = [] + for task in task_list: + if "mean_task_score" in task and task["mean_task_score"] != -1: + num_demo = 1 if len(task["example_contents"]) > 0 else 0 + task_results.append( + { + "name": task["task_name"], + "score": task["mean_task_score"], + "eval_type": task.get("eval_type", _determine_eval_style(task)), + "num_demo": num_demo, + "num_query": len(task["query_response"]), + } + ) + return task_results + + +def derive_keyword_stats(task_results_with_meta, include_per_task_info=False): + """ + Calculate keyword-based statistics for skills, input_format, and output_format. + """ + skills_stats = defaultdict(lambda: {"count": 0, "total_score": 0.0, "num_samples": 0, "tasks": []}) + input_format_stats = defaultdict(lambda: {"count": 0, "total_score": 0.0, "num_samples": 0, "tasks": []}) + output_format_stats = defaultdict(lambda: {"count": 0, "total_score": 0.0, "num_samples": 0, "tasks": []}) + input_num_stats = defaultdict(lambda: {"count": 0, "total_score": 0.0, "num_samples": 0, "tasks": []}) + app_stats = defaultdict(lambda: {"count": 0, "total_score": 0.0, "num_samples": 0, "tasks": []}) + + for task_name, task in task_results_with_meta.items(): + task_name = task.get("original_task_name", "Unknown Task") + score = task.get("score", 0.0) + num_samples = task.get("num_query", 0) + task.get("num_demo", 0) + + if score == -1: + continue + + for skill in task.get("skills", []): + skills_stats[skill]["count"] += 1 + skills_stats[skill]["total_score"] += score + skills_stats[skill]["num_samples"] += num_samples + if include_per_task_info: + skills_stats[skill]["tasks"].append((task_name, score)) + + for stat_dict, key in [ + (input_format_stats, "input_format"), + (output_format_stats, "output_format"), + (input_num_stats, "num_input"), + (app_stats, "app") + ]: + if value := task.get(key): + stat_dict[value]["count"] += 1 + stat_dict[value]["total_score"] += score + stat_dict[value]["num_samples"] += num_samples + if include_per_task_info: + stat_dict[value]["tasks"].append((task_name, score)) + + all_stats = { + "skills": skills_stats, + "input_format": input_format_stats, + "output_format": output_format_stats, + "input_num": input_num_stats, + "app": app_stats, + } + + for stats_dict in all_stats.values(): + for keyword, data in stats_dict.items(): + data["average_score"] = data["total_score"] / data["count"] if data["count"] > 0 else 0.0 + del data["total_score"] + + return dict(all_stats) + + +def collect_task_metadata(model_results, all_task_meta_path): + """ + Collect task metadata for a model's results using the all_task_meta.json file + """ + # Load the complete task metadata + with open(all_task_meta_path, "r") as f: + all_meta = json.load(f) + + # Create result dictionary + all_task_meta = {} + + # Match results with metadata + for task_result in model_results: + task_name = task_result["name"] + if task_name in all_meta: + meta = all_meta[task_name].copy() # Create a copy to avoid modifying original + meta.update(task_result) + all_task_meta[task_name] = meta + + return all_task_meta diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/derive_breakdown_results.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/derive_breakdown_results.py new file mode 100644 index 0000000000000000000000000000000000000000..db62252a7068e2d47771e248a07bad8415ca0932 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/derive_breakdown_results.py @@ -0,0 +1,141 @@ +import json +import argparse +from pathlib import Path +from analysis_utils import ( + task_list_refine, + collect_task_metadata, + derive_keyword_stats, +) + +def calculate_model_summary(task_results_with_meta): + """ + Re-calculate model performance summary statistics across core and open tasks. + + Args: + task_results: List of task results with scores + task_metadata: Dictionary containing task metadata including task types + + Returns: + Dictionary containing summary statistics for core and open tasks + """ + core_tasks = [] + open_tasks = [] + + # Separate core and open tasks + for task in task_results_with_meta.values(): + if task['eval_type'] == 'llm': + open_tasks.append(task) + else: + core_tasks.append(task) + + def calculate_stats(tasks): + if not tasks: + return None + + total_samples = sum(task.get('num_query', 0) for task in tasks) + macro_scores = [task.get('score', 0) for task in tasks] + + return { + "num_eval_tasks": len(tasks), + "num_eval_samples": total_samples, + "macro_mean_score": sum(macro_scores) / len(tasks) if tasks else 0, + } + + core_stats = calculate_stats(core_tasks) + open_stats = calculate_stats(open_tasks) + + # Calculate overall score (weighted average based on number of tasks) + total_tasks = (core_stats["num_eval_tasks"] + open_stats["num_eval_tasks"]) + overall_score = ( + (core_stats["macro_mean_score"] * core_stats["num_eval_tasks"] + + open_stats["macro_mean_score"] * open_stats["num_eval_tasks"]) / total_tasks + if core_stats and open_stats + else 0 + ) + + return { + "core": core_stats, + "open": open_stats, + "overall_score": overall_score + } + +def merge_json_files(input_dir, output_path, key="name"): + """ + Merge multiple JSON files containing evaluation results from a directory. + Looks for all files matching pattern 'data_with_scores*.json'. + Prioritizes LLM evaluations over rule-based ones when duplicates exist. + """ + data_dict = {} # Using name as key for easy lookup and updates + + # Find all matching JSON files in the directory + json_paths = list(Path(input_dir).glob("megabench_score*.json")) + print(f"Found {len(json_paths)} files to merge") + + # Load and merge all JSON files + for path in json_paths: + print(f"Processing {path}") + with open(path, "r") as f: + data = json.load(f) + if isinstance(data, dict) and "data" in data: + data = task_list_refine(data["data"]) + + # Update or add entries + for item in data: + item_key = item[key] + # If new item or if new item is LLM-evaluated (prioritize LLM eval) + if item_key not in data_dict or ( + item.get("eval_type") == "llm" and data_dict[item_key].get("eval_type") != "llm" + ): + data_dict[item_key] = item + + # Convert back to list + merged_data = list(data_dict.values()) + + # Save the merged result + output_path.parent.mkdir(parents=True, exist_ok=True) + with open(output_path, "w") as f: + json.dump(merged_data, f, indent=4) + + print(f"Merged file with {len(merged_data)} tasks saved to {output_path}") + return merged_data + +def main(): + # Parse command line arguments + parser = argparse.ArgumentParser(description='Merge and process evaluation score files.') + parser.add_argument('--input_dir', type=str, help='Directory containing score files') + args = parser.parse_args() + + # Convert path to Path object + input_dir = Path(args.input_dir) + + # Create analysis directory under input directory + output_dir = input_dir / "analysis" + output_dir.mkdir(parents=True, exist_ok=True) + + # Merge files + output_path = output_dir / "task_results.json" + task_results = merge_json_files(input_dir, output_path) + + # Collect metadata and derive keyword stats + task_results_with_meta = collect_task_metadata(task_results, all_task_meta_path="all_task_meta.json") + keyword_stats = derive_keyword_stats(task_results_with_meta) + + # Calculate model summary + model_summary = calculate_model_summary(task_results_with_meta) + + summary_results = { + "model_summary": model_summary, + "keyword_stats": keyword_stats + } + + # Save keyword stats + stats_output = output_dir / "summary_and_keyword_stats.json" + with open(stats_output, "w") as f: + json.dump(summary_results, f, indent=4) + + print(f"\nResults saved in {output_dir}:") + print(f"- Merged data: {output_path}") + print(f"- Multi-dimensional keywords stats: {stats_output}") + +if __name__ == "__main__": + main() diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/get_si_subset_from_full.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/get_si_subset_from_full.py new file mode 100644 index 0000000000000000000000000000000000000000..39ce3e55d53e9cc285347b0ee08a783bb3960880 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/megabench/tools/get_si_subset_from_full.py @@ -0,0 +1,92 @@ +""" +For propietary models that naturally suport multi-image or video inputs, we don't run the single-image setting, +instead, we directly compute the SI results by extracting the subset results from the full task set to compute the stats. +""" + + +from pathlib import Path +import json +import argparse +from analysis_utils import ( + derive_keyword_stats, + collect_task_metadata +) +from derive_breakdown_results import calculate_model_summary + + +def process_subset_results(input_dir, eval_type): + """Read results from the full results directory structure""" + task_results_path = input_dir / "analysis" / "task_results.json" + + # Load task results + with open(task_results_path, "r") as f: + task_results = json.load(f) + + results_with_meta = collect_task_metadata(task_results, all_task_meta_path="all_task_meta.json") + + # Filter tasks by eval_type + filtered_results_with_meta = {task_name:task for task_name, task in results_with_meta.items() if task["eval_type"] == eval_type and task["num_input"] == "1-image"} + filtered_results = [task for task in task_results if task["name"] in filtered_results_with_meta] + + if not filtered_results: + print(f"Warning: No tasks found in {input_dir} with eval_type {eval_type}") + return None, None, None + + # Calculate summary statistics + num_tasks = len(filtered_results) + total_queries = sum(task["num_query"] for task in filtered_results) + total_correct = sum(round(task["score"] * task["num_query"]) for task in filtered_results) + + summary = { + "num_eval_tasks": num_tasks, + "num_eval_samples": total_queries, + "macro_mean_score": sum(task["score"] for task in filtered_results) / num_tasks, + } + + return filtered_results, filtered_results_with_meta, summary + + +def main(input_dir, output_dir): + # Process core and open set results + filtered_tasks_core, filtered_tasks_core_with_meta, _ = process_subset_results(input_dir, "rule") + filtered_tasks_open, filtered_tasks_open_with_meta, _ = process_subset_results(input_dir, "llm") + + if filtered_tasks_core and filtered_tasks_open: + task_results = filtered_tasks_core + filtered_tasks_open + task_results_with_meta = {**filtered_tasks_core_with_meta, **filtered_tasks_open_with_meta} + + # Save task results + with open(output_dir / "task_results.json", "w") as f: + json.dump(task_results, f, indent=4) + + # Collect metadata and derive keyword stats + keyword_stats = derive_keyword_stats(task_results_with_meta) + + # Calculate model summary + model_summary = calculate_model_summary(task_results_with_meta) + + summary_results = { + "model_summary": model_summary, + "keyword_stats": keyword_stats + } + + # Save keyword stats + stats_output = output_dir / "summary_and_keyword_stats.json" + with open(stats_output, "w") as f: + json.dump(summary_results, f, indent=4) + + print(f"\nResults saved in {output_dir}") + + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_dir", type=str, required=True, help="Path to the input directory containing full results") + parser.add_argument("--output_dir", type=str, required=True, help="Path to the output directory") + args = parser.parse_args() + + input_dir = Path(args.input_dir) + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + main(input_dir, output_dir) \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/mmdu.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/mmdu.py new file mode 100644 index 0000000000000000000000000000000000000000..d432bdbb99f6dedec4691b4b1095410d41d82827 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/mmdu.py @@ -0,0 +1,126 @@ +from ...smp import * + +meta_prompt = """ +You are an assistant skilled at evaluating the quality of creative text. +Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to \ +the user question displayed below. You'll need to assess the response on the following dimensions: \ +Creativity, Richness, Visual Perception, Logical Coherence, Answer Accuracy and Image Relationship Understanding. \ +We will provide you with a creative question and the AI model's response and a reference answer for your evaluation. \ +As you begin your assessment, follow this process: +1. Evaluate the AI model's answers on different dimensions, pointing out its strengths or weaknesses \ +in each dimension and assigning a score of 1 to 10 for each. +2. Finally, based on the assessments across dimensions, \ +provide an overall score of 1 to 10 for the AI model's response. +3. Your scoring should be as stringent as possible and follow the scoring rules below: +In general, the higher the quality of the model's response and its strict adherence to user needs, \ +the higher the score. Responses that do not meet user needs will receive lower scores. +Scoring rules: +Creativity: +Scores 1-2 when there is no innovation or uniqueness in the content. +Scores 3-4 when providing partially original content but with low creative quality. +Scores 5-6 when mostly creative but lacks significant novelty, with moderate quality. +Scores 7-8 when having novelty and high-quality content. +Scores 9-10 when highly novel and of exceptional quality compared to the reference answer. +Richness: +Scores 1-2 when lacking depth and breadth, with very limited information. +Scores 3-4 when limited in depth and breadth, with fewer explanations and examples, showing low diversity. +Scores 5-6 when limited in depth and breadth but provides basic necessary information. +Scores 7-8 when providing depth and useful additional information. +Scores 9-10 when providing exceptional depth, breadth, and high diversity compared to the reference answer. +Visual Perception: +Scores 1-2 when the description of the visual information in the image contains errors or \ +is significantly inconsistent with the content of the image. +Scores 3-4 When the description of the visual information in the image reflects only a small amount \ +of the image's information and contains some errors. +Scores 5-6 when the description of the visual information in the image includes the basic information \ +of the image but contains minimal information. +Scores 7-8 when the description of the visual information in the image matches the image well and is rich in content, \ +providing a substantial amount of information about the image. +Scores 9-10 when the description of the visual information in the image not only matches the image \ +but also is more detailed and informative compared to the reference answer, providing more information about the image. +Logical Coherence: +Scores 1-2 when entirely incoherent, lacking any logic, and not matching the question or known information. +Scores 3-4 when somewhat coherent but with many logical errors or inconsistencies. +Scores 5-6 when mostly coherent, with few errors, but may struggle to maintain complete coherence in complex situations. +Scores 7-8 when excellent logical handling, very few errors. +Scores 9-10 when flawless logic, impeccable in handling complexity, \ +and significantly higher logical coherence compared to the reference answer. +Answer Accuracy: +Scores 1-2 when the answer is significantly inconsistent with the question or contains obvious errors. +Scores 3-4 when the answer is partially correct but contains some errors or is incomplete. +Scores 5-6 when the answer is basically correct but lacks details or is not sufficiently detailed. +Scores 7-8 when the answer is accurate and detailed, fully corresponding to the question. +Scores 9-10 when the answer is not only accurate and detailed but also provides additional useful information, \ +exceeding expectations. +Image Relationship Understanding: +Scores 1-2 when there are significant errors or confusion in distinguishing and describing different images, \ +unable to correctly identify and relate the content of the images. +Scores 3-4 when the description of different images reflects only minimal distinguishing information, \ +contains some errors and confusion, and fails to clearly differentiate and relate the images. +Scores 5-6 when the description of different images includes basic distinguishing information, \ +is able to correctly identify and relate the images in a basic manner, \ +but the information provided is minimal and lacks detail. +Scores 7-8 when the description of different images is accurate and detailed, \ +clearly distinguishing and relating the images, \ +with rich content that points out the main commonalities and differences between the images. +Scores 9-10 when the description of different images is not only accurate and detailed but also \ +provides richer information and analysis, clearly distinguishing and relating the images, \ +more comprehensively pointing out the commonalities and differences \ +between the images compared to the reference answer. +Overall Score: +Scores 1-2 when irrelevant to the question, factually incorrect, or generates harmful content. +Scores 3-4 when no serious errors, mostly harmless, but of low quality and does not meet requirements. +Scores 5-6 when basically meeting requirements but performing poorly in some dimensions, with moderate quality. +Scores 7-8 when performing well in all dimensions. +Scores 9-10 when fully addressing user questions and all requirements, significantly surpassing the reference answer. +Please remember, you must evaluate and explain before scoring. After your explanation for each dimension, \ +add the score for that dimension. Finally, at the end of your response, \ +in the format of the dictionary (including brackets), return all your scoring results, \ +ensuring your scores are integers: +{'Dimension One': Score, 'Dimension Two': Score, ..., 'Overall Score': Score}, \ +for example: {'Creativity': 9, 'Richness': 6, ..., 'Overall Score': 7}.\n +""" +question_begin_prompt = '[Question]' +reference_begin_prompt = '[The Start of Reference Answer]' +reference_end_prompt = '[The End of Reference Answer]' +answers_begin_prompt = '[The Start of Assistant’s Answer]' +answers_end_prompt = '[The End of Assistant’s Answer]' + + +def mmdu_score(model, line): + question = eval(line['question']) + gt = eval(line['answer']) + prediction = eval(line['prediction']) + + DIMS = [ + 'Creativity', 'Richness', 'Visual Perception', 'Logical Coherence', + 'Answer Accuracy', 'Image Relationship Understanding', 'Overall Score' + ] + + all_result_dict = [] + logs = [] + for j in range(len(question)): + try: + prompt = meta_prompt + question_begin_prompt + '\n' + question[j] + '\n\n' + \ + reference_begin_prompt + '\n' + gt[j] + '\n' + reference_end_prompt + '\n\n' + \ + answers_begin_prompt + '\n' + prediction[j] + '\n' + answers_end_prompt + response = model.generate(prompt) + start_index = response.find('{') + end_index = response.rfind('}') + 1 + dictionary_str = response[start_index: end_index] + result_dict = eval(dictionary_str) + all_result_dict.append(result_dict) + if all([x in result_dict for x in DIMS]): + logs.append('Succeed') + else: + logs.append( + f'Following Dims are not in results of turn {j}: ' + f'{",".join([x for x in DIMS if x not in result_dict])}' + ) + except Exception as e: + logging.warning(str(e)) + all_result_dict.append({d: None for d in DIMS}) + logs.append(str(e)) + + df = pd.DataFrame(all_result_dict) + return dict(res=df, log='\n'.join(logs)) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/mmsci.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/mmsci.py new file mode 100644 index 0000000000000000000000000000000000000000..329c3db4bf1a2b9a37ae985750b947358ce6726f --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/mmsci.py @@ -0,0 +1,434 @@ +# flake8: noqa +import evaluate +from tqdm import tqdm +import numpy as np +import os.path as osp +import pandas as pd +from vlmeval import load, dump, track_progress_rich + +g_eval_prompt = """ +You will be given a oracle caption that describes an image. You will then be given a second caption written for the same image. + +Your task is to rate the second caption on one metric. + +Evaluation Criteria: + +Relevance (1-5) - The extent to which the second caption is relevant to the key elements and context described in the oracle caption. A relevant caption should focus on the same subjects, objects, actions, or context highlighted in the oracle caption, without introducing unrelated or extraneous details. + +Evaluation Steps: + +1. Review the Oracle Caption: Carefully read the oracle caption to understand the main elements and context it describes. +2. Review the Second Caption: Assess whether the second caption focuses on the same key elements and context as the oracle caption. Evaluate if the second caption stays on topic and does not introduce irrelevant details. +3. Assign a Score for Relevance: Based on the Evaluation Criteria, rate how relevant the second caption is to the oracle caption's description of the same image. +""" + +generate_atomic_facts_sys_prompt = """ +You will be given a caption for a figure containing multiple panels, which includes descriptions for the entire figure as well as each individual panel. Your task is to break down the caption into separate, independent descriptions for the entire figure and each panel, formatted appropriately and separated by '-' +""" + +generate_atomic_facts_usr_prompt_one_shot = """ +The figure consists of four sub-figures labeled a, b, c, and d. All four images appear to be scanning electron microscope (SEM) images showing the microstructure of different materials, likely related to the iron-based cathode catalysts described in the article.\n\na. This image shows a highly porous structure with interconnected particles forming a network. The particles appear to be in the nanometer to micrometer size range. The scale bar indicates 1 \u03bcm.\n\nb. This image displays a closer view of what seems to be a similar material to (a), but at a higher magnification. The individual particles are more clearly visible, showing a rough, granular texture. The scale bar indicates 200 nm.\n\nc. This image reveals a different morphology compared to (a) and (b). It shows larger, more consolidated structures with a rougher surface texture. There are still visible pores and gaps between the structures. The scale bar indicates 1 \u03bcm.\n\nd. This final image appears to be a cross-sectional view of a porous material, possibly showing the internal structure of the catalyst. It reveals a highly interconnected network of pores and channels throughout the material. The scale bar indicates 200 nm.\n\nThese images likely represent different stages or variations of the iron-acetate/phenanthroline/zeolitic-imidazolate-framework-derived electrocatalyst mentioned in the article. The varying structures and porosities shown in these images could be related to the enhanced mass-transport properties and increased volumetric activity described in the text. +Model_Response: +- The figure consists of four sub-figures labeled a, b, c, and d. +- All four images appear to be scanning electron microscope (SEM) images. +- The images show the microstructure of different materials. +- The materials are likely related to the iron-based cathode catalysts described in the article. +- Image a shows a highly porous structure with interconnected particles forming a network. +- The particles in image a are in the nanometer to micrometer size range. +- The scale bar in image a indicates 1 μm. +- Image b displays a closer view of a material similar to that in image a but at higher magnification. +- The individual particles in image b are more clearly visible and show a rough, granular texture. +- The scale bar in image b indicates 200 nm. +- Image c shows larger, more consolidated structures with a rougher surface texture. +- There are visible pores and gaps between the structures in image c. +- The scale bar in image c indicates 1 μm. +- Image d appears to be a cross-sectional view of a porous material. +- Image d reveals the internal structure of the catalyst with a highly interconnected network of pores and channels. +- The scale bar in image d indicates 200 nm. +- These images likely represent different stages or variations of the iron-acetate/phenanthroline/zeolitic-imidazolate-framework-derived electrocatalyst mentioned in the article. +- The varying structures and porosities shown in these images could be related to the enhanced mass-transport properties described in the text. +- The varying structures and porosities in the images may contribute to increased volumetric activity described in the article. +""" + +from pycocoevalcap.cider.cider import Cider +cider_scorer = Cider() + + +def compute_cider(predictions, references): + """ + predictions: list of strings + references: list of list of strings + """ + return cider_scorer.compute_score([{i: [ref for ref in refs]} for i, refs in enumerate(references)], + {i: [pred] for i, pred in enumerate(predictions)})[0] + + +def get_all_metrics_for_reference_based_metrics( + references, candidates, image_id_list, reference_based_metrics_file, +): + # Initialize the metrics + existing_data = load(reference_based_metrics_file) if osp.exists(reference_based_metrics_file) else {} + bleu_metric = evaluate.load("bleu") + rouge_metric = evaluate.load("rouge") + meteor_metric = evaluate.load("meteor") + bertscore_metric = evaluate.load("bertscore") + # cider_metric = evaluate.load("cider") + + # bleu_scores = [[], [], [], [], []] # B1, B2, B3, B4, BLEU + # rouge_scores = [[], [], [], []] # ROUGE1, ROUGE2, ROUGEL, ROUGELSUM + # meteor_scores = [] + # bertscore_scores = [] + + # Calculate scores for each sample + idx = 1 + print(f"Calculating metrics for {len(references)} samples") + assert len(references) == len(candidates) == len(image_id_list) + for ref, cand, image_id in tqdm(zip(references, candidates, image_id_list)): + if not cand.strip(): + print(cand) + continue + + default_bleu_score = {"bleu": 0.0, "precisions": [0.0, 0.0, 0.0, 0.0]} + if image_id not in existing_data: + existing_data[image_id] = {} + bleu_score = existing_data.get(image_id, {}).get('bleu_score', default_bleu_score) + + if bleu_score == default_bleu_score: + try: + bleu_score = bleu_metric.compute(predictions=[cand], references=ref) + except: + bleu_score = default_bleu_score + existing_data[image_id]['bleu_score'] = bleu_score + + default_rouge_score = { + "rouge1": 0.0, + "rouge2": 0.0, + "rougeL": 0.0, + "rougeLsum": 0.0, + } + rouge_score = existing_data.get(image_id, {}).get('rouge_score', default_rouge_score) + if rouge_score == default_rouge_score: + try: + rouge_score = rouge_metric.compute(predictions=[cand], references=ref) + except: + rouge_score = default_rouge_score + existing_data[image_id]['rouge_score'] = rouge_score + + # meteor score + default_meteor_score = 0.0 + meteor_score = existing_data.get(image_id, {}).get('meteor_score', default_meteor_score) + if meteor_score == default_meteor_score: + try: + meteor_score = meteor_metric.compute(predictions=[cand], references=ref)[ + "meteor" + ] + except: + meteor_score = default_meteor_score + existing_data[image_id]['meteor_score'] = meteor_score + + # bertscore + default_bertscore_score = {"f1": [0.0]} + bertscore_score = existing_data.get(image_id, {}).get('bertscore_score', default_bertscore_score) + if bertscore_score == default_bertscore_score: + try: + bertscore_score = bertscore_metric.compute( + predictions=[cand], references=ref, lang="en" + ) + except: + bertscore_score = default_bertscore_score + existing_data[image_id]['bertscore_score'] = bertscore_score + + # cider score + default_cider_score = 0.0 + cider_score = existing_data.get(image_id, {}).get('cider_score', default_cider_score) + if cider_score == default_cider_score: + try: + # cider_score = cider_metric.compute(predictions=[cand], references=ref)["score"] + cider_score = compute_cider([cand], [ref]) + except: + cider_score = default_cider_score + existing_data[image_id]['cider_score'] = cider_score + + if idx % 50 == 0: + print(f"Saving 50 samples to {reference_based_metrics_file}") + dump(existing_data, reference_based_metrics_file) + + idx += 1 + + dump(existing_data, reference_based_metrics_file) + print(f"Saved all samples to {reference_based_metrics_file}") + + return existing_data + + +def llm_openai_judge(prediction, reference, dimension, prompt, model): + + if isinstance(reference, list): + reference = reference[0] + assert isinstance(prediction, str) + + if dimension == "fluency": + sys_prompt = prompt.format(Second=prediction) # no reference + usr_prompt = f"Caption:\n{prediction}\n" + else: + sys_prompt = prompt.format(Target=reference, Second=prediction) + usr_prompt = f"Oracle Caption:\n{reference}\n\nSecond Caption:\n{prediction}\n\n" + usr_prompt += f"What is the {dimension} score (1-5)? Return the score ONLY!" + + model.system_prompt = sys_prompt + + answer = 0 + all_responses = model.generate(usr_prompt) + + for response in all_responses.choices: + response = response.message.content.strip() + try: + answer = int(response) + break + except: + for s in ["1", "2", "3", "4", "5"]: + if s in response: + answer = int(s) + break + return answer + + +def g_eval_generate(evaluator, usr_prompt): + return evaluator.generate(usr_prompt) + + +def get_all_metrics_for_g_eval_score(references, candidates, evaluator, tmp_file, nproc=4): + # relevance scores + assert len(references) == len(candidates) + tups = [] + indices = range(len(references)) + for reference, prediction in tqdm(zip(references, candidates)): + dimension = "relevance" + if isinstance(reference, list): + reference = reference[0] + assert isinstance(prediction, str) + + sys_prompt = g_eval_prompt.format(Target=reference, Second=prediction) + usr_prompt = f"Oracle Caption:\n{reference}\n\nSecond Caption:\n{prediction}\n\n" + usr_prompt += f"What is the {dimension} score (1-5)? Return the score ONLY!" + + evaluator.system_prompt = sys_prompt + tups.append((evaluator, usr_prompt)) + + ans = {} + if osp.exists(tmp_file): + ans = load(tmp_file) + ans = {k: v for k, v in ans.items() if evaluator.fail_msg not in str(v)} + tups = [x for x, i in zip(tups, indices) if i not in ans] + indices = [i for i in indices if i not in ans] + + if len(indices): + _ = track_progress_rich( + g_eval_generate, + tups, + nproc=nproc, + chunksize=nproc, + keys=indices, + save=tmp_file, + ) + ans = load(tmp_file) + return ans + + +def merge_rating(refer_based_metrics_output_file_name, g_eval_metrics_output_file_name, fact_score_metrics_output_file): + # Update metrics columns for each row + refer_based_metrics_output_file = load(refer_based_metrics_output_file_name) + g_eval_metrics_output_file = load(g_eval_metrics_output_file_name) + # fact_score_metrics_output_file = load(fact_score_metrics_output_file) + for idx, item in refer_based_metrics_output_file.iterrows(): + ref_based_metrics = eval(item['reference_based_metrics']) + refer_based_metrics_output_file.at[idx, 'B2'] = ref_based_metrics['bleu_score']['precisions'][1] * 100 + refer_based_metrics_output_file.at[idx, 'RL'] = ref_based_metrics['rouge_score']['rougeL'] * 100 + refer_based_metrics_output_file.at[idx, 'M'] = ref_based_metrics['meteor_score'] * 100 + refer_based_metrics_output_file.at[idx, 'CD'] = ref_based_metrics['cider_score'] * 100 + refer_based_metrics_output_file.at[idx, 'BS'] = ref_based_metrics['bertscore_score']['f1'][0] * 100 + refer_based_metrics_output_file.at[idx, 'GE'] = g_eval_metrics_output_file.iloc[idx]['g_eval_metrics'] + # refer_based_metrics_output_file.at[idx, 'FS'] = eval(fact_score_metrics_output_file.iloc[idx]['fact_score_metrics'])['score'] + + # df = refer_based_metrics_output_file + # # Filter rows for the two settings + # df_filtered = df[df['setting'].isin(['w/o.Abstract_w/o.Content', 'w.Abstract_w/o.Content'])] + # # metrics = ['B2', 'RL', 'M', 'BS', 'CD', 'GE'] + # metrics = ['B2', 'RL', 'M', 'BS', 'GE'] + + # # Group by subject and setting, averaging metrics + # grouped = df_filtered.groupby(['subject', 'setting'])[metrics].mean() + + # # Pivot to get multi-level columns: top level is setting, sub-level is metric + # result_df = grouped.unstack(level='setting') + # # Swap levels so that top level is setting + # result_df.columns = result_df.columns.swaplevel(0, 1) + # # Ensure columns order matches required settings + # result_df = result_df.reindex(columns=['w/o.Abstract_w/o.Content', 'w.Abstract_w/o.Content'], level=0) + + # # Reset index to have subject as a column + # result_df = result_df.reset_index() + # return result_df + + df = refer_based_metrics_output_file + + # metrics = ['B2', 'RL', 'M', 'BS', 'CD', 'FS', 'GE'] + metrics = ['B2', 'RL', 'M', 'BS', 'CD', 'GE'] + + subject_df = df.groupby('subject')[metrics].mean().reset_index() + + category_df = df.groupby('category')[metrics].mean().reset_index() + category_df.rename(columns={'category': 'subject'}, inplace=True) + category_df['subject'] = 'CATEGORY_' + category_df['subject'] + + overall_row = df[metrics].mean().to_frame().T + overall_row.insert(0, 'subject', 'Overall') + + result_df = pd.concat([subject_df, category_df, overall_row], ignore_index=True) + + return result_df + + +def dump_multiindex_excel(df, path): + from openpyxl import Workbook + from openpyxl.styles import Alignment + from openpyxl.utils.dataframe import dataframe_to_rows + + wb = Workbook() + ws = wb.active + ws.title = "Sheet1" + + ws.cell(row=1, column=1, value="subject") + col_levels = df.columns.levels + col_codes = df.columns.codes + level0_labels = [col_levels[0][i] for i in col_codes[0]] + level1_labels = [col_levels[1][i] for i in col_codes[1]] + unique_level0 = list(dict.fromkeys(level0_labels)) + + col_offset = 2 + start = col_offset + for lvl0 in unique_level0: + count = level0_labels.count(lvl0) + end = start + count - 1 + ws.merge_cells(start_row=1, start_column=start, end_row=1, end_column=end) + ws.cell(row=1, column=start, value=lvl0) + start = end + 1 + + ws.cell(row=2, column=1, value="subject") + for idx, label in enumerate(level1_labels): + ws.cell(row=2, column=col_offset + idx, value=label) + + for r_idx, row in enumerate(df.itertuples(index=False), start=3): + ws.cell(row=r_idx, column=1, value=row[0]) # subject + for c_idx, val in enumerate(row[1:], start=2): + ws.cell(row=r_idx, column=c_idx, value=val) + + for row in ws.iter_rows(): + for cell in row: + cell.alignment = Alignment(horizontal='center', vertical='center') + + wb.save(path) + + +def extract_int(s, scales=[0, 5]): + # Regular expression to find float numbers between 0 and 1, including 0 and 1 + for score in range(scales[0], scales[1] + 1): + if str(score) in s: + return int(score) + return None + + +def get_score( + model, + content, + gamma=10, + scales=[0, 5], + atomic_facts=None, +): + + total_decisions = [] + scores = [] + init_scores = [] + + decisions = [] + for atom in atomic_facts: + if not atom: + continue + atom = atom.strip() + + definition = f"How relevant is the generated caption to the given human-written caption for the figure? Determine whether the information in the generated caption is included or mentioned in the human-written caption. Respond with a score between {scales[0]} and {scales[1]}." + + prompt = "Human-written caption: {}\n\nGenerated caption: {}\n\nHow relevant is the generated caption to the given human-written caption? Respond with ONLY a score between {} and {}.".format( + content, atom, scales[0], scales[1] + ) + + model.system_prompt = definition + outputs = model.generate(prompt) + + generated_answer = outputs.lower().strip() + + generated_score = extract_int(generated_answer, scales=scales) + + if generated_score is None: + generated_score = 0.0 + else: + try: + # normalize the score to 0-1 + generated_score = float(generated_score) / scales[1] + except Exception as e: + print("Error:", e) + generated_score = 0.0 + + print("Atom score:", generated_score) + + decisions.append({"atom": atom, "is_supported": generated_score}) + + score = np.mean([d["is_supported"] for d in decisions]) + if gamma: + init_scores.append(score) + penalty = ( + 1.0 if len(atomic_facts) > gamma else np.exp(1 - gamma / len(atomic_facts)) + ) + score = penalty * score + + total_decisions.append(decisions) + scores.append(score) + print("Final score:", score) + + out = { + "score": np.mean(scores), + "decisions": total_decisions, + "num_facts_per_response": np.mean( + [len(d) for d in total_decisions if d is not None] + ), + } + + if gamma: + out["init_score"] = np.mean(init_scores) + + return out + + +def fact_score_generate(model, line): + generated_caption = line['prediction'] + model.system_prompt = generate_atomic_facts_sys_prompt + mes = [ + dict(type='text', value=generate_atomic_facts_usr_prompt_one_shot), + dict(type='text', value=generated_caption + '\nModel_Response:'), + ] + generated_facts = model.generate(mes).strip() + formatted_facts = generated_facts.split("-") + # topic = line["subject"] + # abstract = line["abstract"] + + # ground-truth caption + content = line["caption"] + + score_out = get_score( + model, + content, + gamma=10, + scales=[0, 5], + atomic_facts=formatted_facts, + ) + return score_out diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/mmsci4eval_req.txt b/VLMEvalKit-sudoku/vlmeval/dataset/utils/mmsci4eval_req.txt new file mode 100644 index 0000000000000000000000000000000000000000..9920efa1ab19d54f882b31890eba2d204f7afa97 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/mmsci4eval_req.txt @@ -0,0 +1,6 @@ +evaluate +pycocoevalcap +bert_score +rouge_score +nltk +absl-py diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/olympiadbench.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/olympiadbench.py new file mode 100644 index 0000000000000000000000000000000000000000..d11f8180f01489e72f106c12272c1a77995e7fb7 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/olympiadbench.py @@ -0,0 +1,703 @@ +import re +import json +from math import isclose +from decimal import Decimal, getcontext +from fractions import Fraction +import sys +import math +import timeout_decorator +import logging +from ...smp import * + +try: + import sympy as sp + from sympy import simplify, Eq, sympify, evalf, Pow + from sympy.parsing.latex import parse_latex + import antlr4 +except ImportError: + logging.warning('sympy or antlr4 is not installed, please install it for OlympiadBench evaluation.') + +FAIL_MSG = 'Failed to obtain answer via API.' + + +def get_gpt4_extract_ICE(): + example_1 = """ +1. +Model response: 'Rounded to two decimal places, the perimeter of the sector is approximately:\n\n(-2, 1)' +Extracted Answer: (-2, 1) +""" # noqa + + example_2 = """ +2. +Model response: 'at those points.\n\nTherefore, the correct option that represents the meaning of the intersection points of the graphs is:\n\nD. They give the solutions to the equation $f(t)=g(t)$.",' +Extracted Answer: D +""" # noqa + + example_3 = """ +3. +Model response: ' at 1 (there's a closed circle at y = 1), the range in interval notation is \\((-4, 1]\\).\n\nFinal values:\nDomain: \\((-3, 3]\\)\nRange: \\((-4, 1]\\)' +Extracted Answer: Domain: \\((-3, 3]\\)\nRange: \\((-4, 1]\\) +""" # noqa + + example_4 = """ +4. +Model response: 'As it stands, I cannot provide the correct option letter because there isn't enough information to solve for 'y'.' +Extracted Answer: null +""" # noqa + + example_5 = """ +5. +Model response: 'Given that AB = 17.6 meters, we can now substitute into the equation:\n\nd = 17.6 / cos(38\u00b0)\n\nTherefore, to one decimal place, the distance d between Ned and Bart is approximately 22.3 meters.' +Extracted answer: 22.3 +""" # noqa + + example_6 = """ +6. +Model response: have all the coefficients for the quadratic function:\n\\( f(x) = ax^2 + bx + c \\)\n\\( f(x) = -1x^2 - 2x + 1 \\)\n\nTherefore, the equation for the graphed function \\( f \\) is:\n\\( f(x) = -x^2 - 2x + 1 \\)"' +Extracted answer: f(x) = -x^2 - 2x + 1 +""" # noqa + + return [example_1, example_2, example_3, example_4, example_5, example_6] + + +def get_gpt4_score_ICE(): + example_1 = """ +[Question]: Write the set of numbers represented on the number line in interval notation. +[Standard Answer]: (-2,1] +[Model_answer] : Extracted Answer: \\((-2, 1)\\) +Judgement: 0 +""" # noqa + + example_2 = """ +[Question]: As shown in the figure, circle O has a radius 1.0, if angle BAC = 60.0, then the length of BC is ()\nChoices:\nA:2\nB:2\u221a{{3}}\nC:\u221a{{3}}\nD:2\u221a{{2}} +[Standard Answer]: C +[Model_answer] : B:2\u221a{{3}} +Judgement: 0 +""" # noqa + + example_3 = """ +[Question]: Find the domain and range of the function f using interval notation. +[Standard Answer]: domain: [-4, 0) and range: (-3, 1] +[Model_answer] : Range: \\((-4, 1]\\) +Judgement: 0 +""" # noqa + + example_4 = """ +[Question]: As shown in the figure, circle O has a radius 1.0, if angle BAC = 60.0, then the length of BC is ()\nChoices:\nA:2\nB:2\u221a{{3}}\nC:\u221a{{3}}\nD:2\u221a{{2}} +[Standard Answer]: C +[Model_answer] : null +Judgement: 0 +""" # noqa + + return [example_1, example_2, example_3, example_4] + + +def build_olympiad_gpt4_extract_prompt(line): + task_description = """ +I am providing you a response from a model to a math problem, termed 'Model Response'. You should extract the answer from the response as 'Extracted Answer'. Directly output the extracted answer with no explanation.\n\n +""" # noqa + prediction = str(line['prediction']) + demo_prompt = task_description + examples = get_gpt4_extract_ICE() + for example in examples: + demo_prompt += example + '\n\n' + test_prompt = f"Model response: '{prediction}'\nExtracted Answer: " + full_prompt = f'{demo_prompt}7.\n{test_prompt}' + + return full_prompt + + +def build_olympiad_gpt4_score_prompt(line): + task_description = """ +Below are two answers to a math or a physics question. Question is [Question], [Standard Answer] is the standard answer to the question, and [Model_answer] is the answer extracted from a model's output to this question. Determine whether these two answers are consistent. +Please note that only when the [Model_answer] completely matches the [Standard Answer] means they are consistent. For non-multiple-choice questions, if the meaning is expressed in the same way, it is also considered consistent, for example, 0.5m and 50cm. +If they are consistent, Judement is 1; if they are different, Judement is 0.\n\n +""" # noqa + question_for_eval = line['question'] + extract = line['extract'] + answer = line['final_answer'] + demo_prompt = task_description + examples = get_gpt4_score_ICE() + for example in examples: + demo_prompt += example + '\n\n' + test_prompt = f""" + [Question]: {question_for_eval} + [Standard Answer]: {answer} + [Model_answer] : {extract} + Judgement:""" + full_prompt = f'{demo_prompt}{test_prompt}' + + return full_prompt + + +def post_check_score(line, prefetch=False): + ans = str(line['final_answer']).strip() + response = str(line['extract']).strip() + + if response == ans: + return response if prefetch else True + else: + return False + + +def Olympiad_auxeval_extract(model, line): + prompt = build_olympiad_gpt4_extract_prompt(line) + log = '' + retry = 5 + for i in range(retry): + prediction = line['prediction'] + res = model.generate(prompt, temperature=i * 0.5) + + if FAIL_MSG in res: + log += f'Try {i}: output is {prediction}, failed to parse.\n' + else: + log += 'Succeed' + return dict(log_extract=log, extract=res) + log += 'All 5 retries failed.\n' + return dict(log_extract=log, extract='') + + +def Olympiad_auxeval_score(model, line): + prompt = build_olympiad_gpt4_score_prompt(line) + log = '' + retry = 5 + if post_check_score(line, prefetch=True): + res = post_check_score(line, prefetch=True) + return dict(log_score='Prefetch succeed', score=True) + for i in range(retry): + prediction = line['prediction'] + res = model.generate(prompt, temperature=i * 0.5) + + if FAIL_MSG in res or res.strip() not in ['0', '1']: + log += f'Try {i}: output is {prediction}, res is {res}, failed to parse.\n' + else: + log += 'Succeed' + return dict(log_score=log, score=int(res) == 1) + log += 'All 5 retries failed.\n' + return dict(log_score=log, score=False) + + +chinese_answer_type_dict = { + 'Numerical': '数值', + 'Expression': '表达式', + 'Equation': '方程', + 'Interval': '区间' +} +english_answer_type_dict = { + 'Numerical': 'a numerical value', + 'Expression': 'an expression', + 'Equation': 'an equation', + 'Interval': 'an interval' +} + + +def get_single_answer_type_text(answer_type, is_chinese): + if '-' in answer_type: # No need now + answer_type = answer_type[:answer_type.find('-')] + for t in ['Numerical', 'Expression', 'Equation', 'Interval']: + if t in answer_type: + if is_chinese: + return chinese_answer_type_dict[t] + else: + return english_answer_type_dict[t] + exit(f'Error parsing answer type {answer_type}!') + + +def get_answer_type_text(answer_type, is_chinese, multiple_answer): + # 'Tuple' has various meanings in different context, such as position or values of a series of variable, + # so it may lead to confusion to directly use 'tuple' in the prompt. + if ('Need_human_evaluate' in answer_type) or ('Tuple' in answer_type): + full_answer_text = '' + else: + if not multiple_answer: + answer_text = get_single_answer_type_text(answer_type, is_chinese) + if is_chinese: + full_answer_text = f',答案类型为{answer_text}' + else: + full_answer_text = f"The answer of The problem should be {answer_text}. " + else: + if ',' not in answer_type: # Same answer type for all answers + answer_text = get_single_answer_type_text(answer_type, is_chinese) + if is_chinese: + full_answer_text = f',题目有多个答案,答案类型均为{answer_text}' + else: + full_answer_text = f'The problem has multiple answers, each of them should be {answer_text}. ' + else: + answer_types = answer_type.split(',') + answer_types = [get_single_answer_type_text(t, is_chinese) for t in answer_types] + if len(set(answer_types)) == 1: + answer_text = answer_types[0] + if is_chinese: + full_answer_text = f',题目有多个答案,答案类型均为{answer_text}' + else: + full_answer_text = f'The problem has multiple answers, each of them should be {answer_text}. ' + else: + if is_chinese: + answer_text = '、'.join(answer_types) + full_answer_text = f',题目有多个答案,答案类型分别为{answer_text}' + else: + answer_text = ', '.join(answer_types) + full_answer_text = ( + f'The problem has multiple answers, with the answers in order being {answer_text}. ' + ) + return full_answer_text + + +def make_input(prompt, question_content): + # diversified based on the vllm, which is not implemented temporarily + input = prompt + '\n' + question_content + return input + + +sys.set_int_max_str_digits(1000000) +# 设置decimal的精度 +getcontext().prec = 50 + + +class MathJudger: + def __init__(self): + self.special_signal_map = { + "\\left": "", + "\\right": "", + "∶": ":", + ",": ",", + "$": "", + "\\approx": "=", + "\\simeq": "=", + "\\sim": "=", + "^\\prime": "'", + "^{\\prime}": "'", + "^\\circ": "", + "%": "", + } + self.pi = parse_latex("\\pi") + self.precision = 1e-8 + + def split_by_comma(self, expr: str): + in_bracket_num = 0 + splitted_expr = [] + start_idx = 0 + for i, char in enumerate(expr): + if char == "(" or char == "[": + in_bracket_num += 1 + elif char == ")" or char == "]": + in_bracket_num -= 1 + elif char == "," and in_bracket_num == 0: + splitted_expr.append(expr[start_idx:i].strip()) + start_idx = i + 1 + + if start_idx < len(expr): + splitted_expr.append(expr[start_idx:].strip()) + + return splitted_expr + + def trans_plus_minus_sign(self, expr_list: list): + new_expr_list = [] + for expr in expr_list: + if "\\pm" in expr: + new_expr_list.append(expr.replace("\\pm", "+")) + new_expr_list.append(expr.replace("\\pm", "-")) + else: + new_expr_list.append(expr) + + return new_expr_list + + def judge(self, expression1, expression2, precision=1e-8): + # (默认 expression1 为 Ground_Truth) + precision = precision if isinstance(precision, list) else [precision] + + try: + expression1, expression2 = self.preprocess(expression1, expression2) + except: + return False + if expression1 == expression2: + # print("原生相等") + return True + + # 去除字符串中的中文字符,因为上面已经判断过了类似回答为"能"或"不能"的含有中文字符的回答情况 + expression1 = re.sub(r'[\u4e00-\u9fff]+', '', expression1) + expression2 = re.sub(r'[\u4e00-\u9fff]+', '', expression2) + + expression1 = self.split_by_comma(expression1) + expression2 = self.split_by_comma(expression2) + + temp_list1 = self.trans_plus_minus_sign(expression1) + temp_list2 = self.trans_plus_minus_sign(expression2) + + # 设计误差值列表 + if len(precision) <= 1: + precision = precision * len(temp_list1) + + if len(temp_list1) != len(temp_list2): + return False + + # 判断两个列表中的元素是否可以两两配对,并且两两相等,由此支持多个回答的比较 + idx = -1 + while len(temp_list1) != 0: + idx = (idx + 1) % len(temp_list1) + + item1 = temp_list1[idx] + self.precision = precision[idx] + # print(self.precision) + + for item2 in temp_list2: + try: + if self.is_equal(item1, item2): + temp_list1.remove(item1) + temp_list2.remove(item2) + precision.remove(self.precision) + break + except Exception as err: + logging.warning(f'{type(err)}: {err}') + continue + else: + # If we didn't break from the inner loop, it means no match was found + return False + + # If all elements are matched and removed, the lists can be paired + return True + + def is_interval(self, epr): + return epr.startswith(("(", "[")) and epr.endswith((")", "]")) + + # 在进行数值计算前,需要将sympy中的pi符号替换为pi的近似数值 + # def sympy_sub_pi(self, expression_sympy): + # return expression_sympy.subs(self.pi, math.pi) + + # 默认第一个表达式是 ground_truth + @timeout_decorator.timeout(30) + def is_equal(self, expression1, expression2): + if expression1 == expression2 and expression1 != "" and expression2 != "": + # print("原生等价") + return True + + # 先判断是否是两个区间,是的话进行判断相等,不相等则返回 False + if self.is_interval(expression1) and self.is_interval(expression2): + try: + if self.interval_equal(expression1, expression2): + # print("区间等价") + return True + except: + return False + + # 再判断是否在数值上相等 + try: + if self.numerical_equal(expression1, expression2): + # print("数值等价") + return True + except: + pass + + # 再判断是否是表达式相等 + try: + if self.expression_equal(expression1, expression2) and not ("=" in expression1 and "=" in expression2): + # print("表达式等价") + return True + except: + pass + + # 再判断是否是等式相等 + try: + if self.equation_equal(expression1, expression2): + # print("等式等价") + return True + except: + pass + + return False + + # 判断两个数值在误差允许范围内是否相等 + def numerical_equal(self, expression1: str, expression2: str, include_percentage: bool = True): + """ + (默认 expression1 为 Ground_Truth) + 函数: 判读两个数值是否在误差允许范围内相等 + 步骤1: 将可能出现的百分号的情况包含进来 + 步骤2: 使用 math.isclose 函数判断是否相等 + """ + reference = float(expression1) + prediction = float(expression2) + + if include_percentage: + gt_result = [reference / 100, reference, reference * 100] + else: + gt_result = [reference] + + for item in gt_result: + # if isclose(item, prediction, abs_tol=self.precision, rel_tol=0): + if abs(item - prediction) <= self.precision * 1.01: + return True + return False + + def expression_equal(self, exp1, exp2): + """ + (默认 expression1 为 Ground_Truth) + 函数: 判断两个表达式是否在数学意义上等价 + 步骤1: 提取表达式, 防止有的模型会给出"x=1"而不是"1" + 步骤2: 使用 sympy 库进行等价判断 + """ + + # 只提取等号右边的表达式,一般左边是所求的量 + def extract_expression(expression): + if "=" in expression: + expression = expression.split("=")[1] + return expression.strip() + + exp1 = extract_expression(exp1) + exp2 = extract_expression(exp2) + + exp_too_long = len(exp1) > 300 or len(exp2) > 300 + + # 将表达式转换为 sympy 中能够进行处理的格式 + expr1_sym = sympify(parse_latex(exp1)) + expr2_sym = sympify(parse_latex(exp2)) + + if expr1_sym == expr2_sym: + return True + else: + expr1_sym = self.sympy_sub_pi(expr1_sym) + expr2_sym = self.sympy_sub_pi(expr2_sym) + # 如果输入的表达式可以计算出具体数值的话,则将其进行数值计算的比较 + + if (expr1_sym.has(sp.Symbol) and not expr2_sym.has(sp.Symbol)) or ( + not expr1_sym.has(sp.Symbol) and expr2_sym.has(sp.Symbol)): + return False + elif not expr1_sym.has(sp.Symbol) and not expr2_sym.has(sp.Symbol): + try: + if not (self.can_compute_power(expr1_sym) and self.can_compute_power(expr2_sym)): + print( + "These two number can not be calculated by current computer for: " + f"\"{str(expr1_sym)}\" and \"{str(expr2_sym)}\"" + ) + return False + if exp_too_long: + print(f'Expression {exp1} or {exp2} is too long to compute. ') + return False + + if abs(expr1_sym.evalf() - expr2_sym.evalf()) <= self.precision * 1.01: + return True + else: + return False + except: + return False + elif exp_too_long: + print(f'Expression {exp1} or {exp2} is too long to compute. ') + return False + else: + try: + simplified_expr = simplify(expr1_sym - expr2_sym) + + num_value = simplified_expr.evalf() + + return abs(num_value) < 1e-3 + except: + return False + + def equation_equal(self, expression1, expression2): + """ + (默认 expression1 为 Ground_Truth) + 函数: 判断两个方程是否在数学意义上等价 + 步骤1: 将一个方程/等式化简为标准方程, 即等式的右边严格等于0, 接下来只需要判断两个等式的左边是否"等价" + 步骤2: 使用 sympy 库计算两个等式左边的商, 如果这个商或者这个商的倒数为整数, 那么数学意义上我们可以推导出这两个方程等价👌 + """ + + # 将等式的右边都移到左边,并返回一个 sympy 格式的表达式 + def simplify_equation(latex_eq): + # 分割等式的左边和右边 + lhs, rhs = latex_eq.split('=') + + # 使用 parse_latex 解析 LaTeX 表达式 + lhs_expr = parse_latex(lhs) + rhs_expr = parse_latex(rhs) + + # 创建等式对象 + equation = Eq(lhs_expr, rhs_expr) + + # 化简等式:将等式右边移到左边 + simplified_eq = simplify(equation.lhs - equation.rhs) + + return simplified_eq + + expr1_sym = simplify_equation(expression1) + expr2_sym = simplify_equation(expression2) + + division_result_1 = simplify(expr1_sym / expr2_sym) + division_result_2 = simplify(expr2_sym / expr1_sym) + + # 如果两个方程转换后的式子相除为整数 且非零,则根据推导可知这两个方程等价 + if (division_result_1.is_Integer and division_result_1 != 0) or ( + division_result_2.is_Integer and division_result_2 != 0): + return True + else: + return False + + def interval_equal(self, expression1, expression2): + # 函数: 判断两个区间是否在数学意义上等价 + # 步骤1: 简化区间的表达式, 去除无关的符号比如"\left", "\right", 同时将可能出现的"x \in"删去 + # 步骤2: 对比两个区间的左右符号、中间出现的数学表达式等是否一致 + + def compare_two_interval(inter1, inter2): + + # 首先比较两边的括号是否一致,一致的话再进行下一步比较 + if inter1[0] != inter2[0] or inter1[-1] != inter2[-1]: + return False + + inter1 = inter1.strip('[]()') + inter2 = inter2.strip('[]()') + + # 分割区间的左右部分 + items_1 = inter1.split(',') + items_2 = inter2.split(',') + + for item_1, item_2 in zip(items_1, items_2): + if not self.expression_equal(item_1, item_2): + return False + return True + + interval1 = expression1 + interval2 = expression2 + + if interval1 == interval2: + return True + else: + inter_list1 = interval1.split("\\cup") + inter_list2 = interval2.split("\\cup") + + if len(inter_list1) != len(inter_list2): + return False + else: + for inter1, inter2 in zip(inter_list1, inter_list2): + if not compare_two_interval(inter1, inter2): + return False + return True + + def preprocess(self, expression1, expression2): + + # 尝试捕获box中的内容,如果有多个则以逗号相连返回,如果一个都没有,则报错 + def extract_boxed_content(latex_str): + # 查找所有的 \boxed{...} 结构 + boxed_matches = re.finditer(r'\\boxed{', latex_str) + results = "" + + for match in boxed_matches: + start_index = match.end() + end_index = start_index + stack = 1 + + # 从 \boxed{ 之后开始搜索,直到找到对应的闭合括号 + while stack > 0 and end_index < len(latex_str): + if latex_str[end_index] == '{': + stack += 1 + elif latex_str[end_index] == '}': + stack -= 1 + end_index += 1 + + if stack == 0: + # 提取 \boxed{} 内部的内容 + content = latex_str[start_index:end_index - 1] + results += content + "," + else: + # 如果括号没有正确闭合,则返回错误信息 + raise ValueError("Mismatched braces in LaTeX string.") + + # 如果没有匹配到'\boxed{}'字符,则默认提取有内容的文字最后一行中的所有公式部分 + if results == "": + last_line_ans = latex_str.strip().split("\n")[-1] + dollar_pattern = r"\$(.*?)\$" + answers = re.findall(dollar_pattern, last_line_ans) + + if answers: + for ans in answers: + results += ans + "," + else: + results = latex_str + + return results + + def sepcial_symbol_replace(expression): + if "\\in " in expression: + expression = expression.split("\\in ")[1] + + # 进行特殊字符的替换,这些字符都不影响latex的解析,属于美观/修饰性字符 + for signal in self.special_signal_map: + expression = expression.replace(signal, self.special_signal_map[signal]) + + expression = expression.strip("\n$,.:;^_=+`!@#$%^&*~,。") + + pattern = r'\\(?:mathrm|mathbf)\{~?([^}]*)\}' + expression = re.sub(pattern, r'\1', expression) + + return expression + + exp1, exp2 = extract_boxed_content(expression1), extract_boxed_content(expression2) + exp1, exp2 = sepcial_symbol_replace(exp1), sepcial_symbol_replace(exp2) + + return exp1, exp2 + + def can_compute_power(self, expr): + """ + Check if the power expression can be computed. + + Parameters: + expr (sympy expression): The expression to check. + + Returns: + bool: True if the expression can be computed, False otherwise. + """ + # Check if the expression is a power expression + if isinstance(expr, Pow): + # Extract the base and the exponent + base, exp = expr.as_base_exp() + + # Check if the base and the exponent are numbers + if base.is_number and exp.is_number: + # Set a threshold for the maximum size of the exponent + MAX_EXP = 1000 # This threshold can be adjusted based on the computing environment + + # Check if the exponent is greater than the threshold + if abs(exp.evalf()) > MAX_EXP: + return False + else: + return True + else: + # If the base or the exponent is not a number, we cannot compute the power + return False + else: + # If the expression is not a power expression, return True as it is not the case we are checking for + return True + + +def extract_answer(is_chinese, model_output, is_deepseek=False): + # deepseekmath has special answering format + if str(model_output) == 'nan': + model_output = 'nan' + + if is_deepseek: + if is_chinese: + matches = re.findall('## 解题答案(.*)', model_output) + else: + matches = re.findall('The answer is: (.*)', model_output) + + # 检测是否至少找到一个匹配,如果没有就直接整个送进去找\boxed{} + if matches: + # 如果找到多个匹配,取最后一个 + model_answer = matches[-1].strip() + return model_answer + else: + return model_output + + if is_chinese: + matches = re.findall('所以最终答案是(.*)', model_output) + else: + matches = re.findall('So the final answer is (.*)', model_output) + + # 检测是否至少找到一个匹配,如果没有就直接整个送进去找\boxed{} + if matches: + # 如果找到多个匹配,取最后一个 + model_answer = matches[-1].strip() + return model_answer + else: + return model_output + + +def calculate_merged_accuracy(reference_dir, text_only): + pass diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/omni_verifier.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/omni_verifier.py new file mode 100644 index 0000000000000000000000000000000000000000..f6c2ecd6dea299046760c9357decae560cc60383 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/omni_verifier.py @@ -0,0 +1,220 @@ +import re +import numpy as np +from .judge_util import build_judge +from .yorn import YOrN_Extraction + +EVAL_TMPL = """ +You are an AI assistant tasked with evaluating whether a model's response correctly answers +a given visual-language question. +You will be provided with: 1. The model's response; 2. The ground truth answer. +Your task is to determine whether the model's response conveys the same meaning as the ground truth. +The response is considered **correct** if: +- It has the same meaning as the ground truth, even if phrased differently. +- It provides additional relevant details without altering the original meaning. +The response is considered **wrong** if: +- It contradicts the ground-truth +- It misses essential information or include additional incorrect information. +Your evaluation should include a single word (Either `"yes"` (if correct) or `"no"` (if incorrect)). + +Now please complete the following task: +[Begin Response]{response}[End Response] +[Begin Ground-Truth]{ground_truth}[End Ground-Truth] +""" + + +def _process_digit_article(inText): + outText = [] + tempText = inText.lower().split() + articles = ['a', 'an', 'the'] + manualMap = { + 'none': '0', 'zero': '0', + 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', + 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10', + 'eleven': '11', 'twelve': '12', 'thirteen': '13', 'fourteen': '14', + 'fifteen': '15', 'sixteen': '16', 'seventeen': '17', 'eighteen': '18', + 'nineteen': '19', 'twenty': '20', + } + contractions = { + 'aint': "ain't", + 'arent': "aren't", + 'cant': "can't", + 'couldve': "could've", + 'couldnt': "couldn't", + "couldn'tve": "couldn't've", + "couldnt've": "couldn't've", + 'didnt': "didn't", + 'doesnt': "doesn't", + 'dont': "don't", + 'hadnt': "hadn't", + "hadnt've": "hadn't've", + "hadn'tve": "hadn't've", + 'hasnt': "hasn't", + 'havent': "haven't", + 'hed': "he'd", + "hed've": "he'd've", + "he'dve": "he'd've", + 'hes': "he's", + 'howd': "how'd", + 'howll': "how'll", + 'hows': "how's", + "Id've": "I'd've", + "I'dve": "I'd've", + 'Im': "I'm", + 'Ive': "I've", + 'isnt': "isn't", + 'itd': "it'd", + "itd've": "it'd've", + "it'dve": "it'd've", + 'itll': "it'll", + "let's": "let's", + 'maam': "ma'am", + 'mightnt': "mightn't", + "mightnt've": "mightn't've", + "mightn'tve": "mightn't've", + 'mightve': "might've", + 'mustnt': "mustn't", + 'mustve': "must've", + 'neednt': "needn't", + 'notve': "not've", + 'oclock': "o'clock", + 'oughtnt': "oughtn't", + "ow's'at": "'ow's'at", + "'ows'at": "'ow's'at", + "'ow'sat": "'ow's'at", + 'shant': "shan't", + "shed've": "she'd've", + "she'dve": "she'd've", + "she's": "she's", + 'shouldve': "should've", + 'shouldnt': "shouldn't", + "shouldnt've": "shouldn't've", + "shouldn'tve": "shouldn't've", + "somebody'd": 'somebodyd', + "somebodyd've": "somebody'd've", + "somebody'dve": "somebody'd've", + 'somebodyll': "somebody'll", + 'somebodys': "somebody's", + 'someoned': "someone'd", + "someoned've": "someone'd've", + "someone'dve": "someone'd've", + 'someonell': "someone'll", + 'someones': "someone's", + 'somethingd': "something'd", + "somethingd've": "something'd've", + "something'dve": "something'd've", + 'somethingll': "something'll", + 'thats': "that's", + 'thered': "there'd", + "thered've": "there'd've", + "there'dve": "there'd've", + 'therere': "there're", + 'theres': "there's", + 'theyd': "they'd", + "theyd've": "they'd've", + "they'dve": "they'd've", + 'theyll': "they'll", + 'theyre': "they're", + 'theyve': "they've", + 'twas': "'twas", + 'wasnt': "wasn't", + "wed've": "we'd've", + "we'dve": "we'd've", + 'weve': "we've", + 'werent': "weren't", + 'whatll': "what'll", + 'whatre': "what're", + 'whats': "what's", + 'whatve': "what've", + 'whens': "when's", + 'whered': "where'd", + 'wheres': "where's", + 'whereve': "where've", + 'whod': "who'd", + "whod've": "who'd've", + "who'dve": "who'd've", + 'wholl': "who'll", + 'whos': "who's", + 'whove': "who've", + 'whyll': "why'll", + 'whyre': "why're", + 'whys': "why's", + 'wont': "won't", + 'wouldve': "would've", + 'wouldnt': "wouldn't", + "wouldnt've": "wouldn't've", + "wouldn'tve": "wouldn't've", + 'yall': "y'all", + "yall'll": "y'all'll", + "y'allll": "y'all'll", + "yall'd've": "y'all'd've", + "y'alld've": "y'all'd've", + "y'all'dve": "y'all'd've", + 'youd': "you'd", + "youd've": "you'd've", + "you'dve": "you'd've", + 'youll': "you'll", + 'youre': "you're", + 'youve': "you've", + } + for word in tempText: + word = manualMap.setdefault(word, word) + if word not in articles: + outText.append(word) + for wordId, word in enumerate(outText): + if word in contractions: + outText[wordId] = contractions[word] + outText = ' '.join(outText) + return outText + + +class OmniVerifier: + + tmpl_map = { + 'boxed': r'\boxed\{([^}]*)\}', + 'brace': r'\{([^}]*)\}' + } + + def __init__(self, + tmpl=None, + judge='gpt-4o', + lower_case=True, + rule_only=False, + retry=3, + timeout=60, + **kwargs): + + self.judge = build_judge(model=judge, retry=retry, timeout=timeout) + self.tmpl = tmpl if tmpl not in self.tmpl_map else self.tmpl_map[tmpl] + self.lower_case = lower_case + self.rule_only = rule_only + if retry >= 3: + self.t_series = list(np.arange(0, 1, 1 / (retry - 1))) + [1.0, ] + else: + self.t_series = [0, 1] if retry == 2 else [0, ] + self.match = -1 + + def verify(self, prediction, gt, **kwargs): + prediction = str(prediction) + gt = str(gt) + if self.tmpl is not None: + matches = re.findall(self.tmpl, prediction) + if len(matches): + if isinstance(self.match, int) and self.match < len(matches): + prediction = matches[self.match] + if self.lower_case: + prediction = prediction.lower() + gt = gt.lower() + prediction = _process_digit_article(prediction) + gt = _process_digit_article(gt) + if gt == prediction: + return True, 'Rule Match' + else: + if self.rule_only: + return False, f'Rule Match: Prediction: {prediction}, GT: {gt}' + judge_prompt = EVAL_TMPL.format(response=prediction, ground_truth=gt) + for t in self.t_series: + res = self.judge.generate(judge_prompt, temperature=t, **kwargs) + answer = YOrN_Extraction(res) + if answer in ['Yes', 'No']: + return answer == 'Yes', 'Judge Match' + return False, f'Judge Failed: Prediction: {prediction}, GT: {gt}' diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/qbench_video.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/qbench_video.py new file mode 100644 index 0000000000000000000000000000000000000000..7ac46945230af493730770dddbb2507488440c6c --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/qbench_video.py @@ -0,0 +1,49 @@ +from ...smp import * +from .multiple_choice import extract_answer_from_item +from PIL import Image, ImageOps +import numpy as np + +FAIL_MSG = 'Failed to obtain answer via API.' + +VQA_JUDGE_SYS_PROMPT = """ +You are a helpful assistant that grades answers related to visual video quality. +There are a lot of special terms or keywords related to video processing and photography. +You will pay attention to the context of `quality evaluation' when grading. +""" + +VQA_JUDGE_USER_PROMPT = """ +Given the question {}, evaluate whether the response {} completely matches the correct answer {}. +First, check the response and please rate score 0 if the response is not a valid answer. +Please rate score 2 if the response completely or almost completely matches the correct answer on completeness, accuracy, and relevance. +Please rate score 1 if the response partly matches the correct answer on completeness, accuracy, and relevance. +Please rate score 0 if the response doesn't match the correct answer on completeness, accuracy, and relevance at all. +Please only provide the result in the following format: Score:' +""" # noqa: E501 + + +def check_ans_mcq(pred, ans, correct_choice, correct_answer): + flag = False + + if correct_choice == pred or correct_choice + "." in pred or correct_answer == pred: + flag = True + elif correct_choice in pred.split("\n"): + flag = True + + return flag + + +def check_ans_vqa(model, line): + score = model.generate(VQA_JUDGE_USER_PROMPT.format(line['question'], line['prediction'], line['answer'])).strip() + return score + + +def get_dimension_rating(score_file): + score = load(score_file) + result_dict = {} + for idx, item in score.iterrows(): + question_type = eval(item['dimensions'])[0].split(',')[0] + if question_type not in result_dict: + result_dict[question_type] = [0, 0] + result_dict[question_type][0] += int(item['score']) + result_dict[question_type][1] += 1 + return result_dict diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/qspatial.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/qspatial.py new file mode 100644 index 0000000000000000000000000000000000000000..97d4125797da5c7af7a7e29806c5ff5d4e074967 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/qspatial.py @@ -0,0 +1,123 @@ +from ...smp import * +from ...utils import can_infer + + +FAIL_MSG = 'Failed to obtain answer via API.' + + +def get_gpt4_ICE_for_qspatial(): + example_1 = """ +Hint: Please answer the question requiring in a tuple format. The tuple should contain a numeric value and a unit, +e.g., (1, m), (2.2, cm), (3.12, meter), at the end.\n +Model response: **Object Identification** + +* The object in question is a chair. +* The chair is not visible in the image. + +**Conclusion** + +The height of the chair cannot be determined from the provided image.\n +Extracted answer: (0, cm) +""" + + example_2 = """ +Hint: Please answer the question requiring in a tuple format. The tuple should contain a numeric value and a unit, +e.g., (1, inch), (1.2, cm), (3.0, feet), at the end.\n +Model response: **Step 1: Identify the stapler and the recycle bin in the image.** + +The stapler is located on the wooden table, and the recycle bin is located on the floor. + +**Step 2: Determine the distance between the stapler and the recycle bin.** + +The stapler is 0.5 meters from the edge of the table, and the recycle bin is 1.5 meters from the edge of the table. +Therefore, the minimum distance between the stapler and the recycle bin is 1.5 - 0.5 = 1 meter. + +**Answer:** 1 m\n +Extracted answer: (1, m) +""" + example_3 = """ +Hint: Please answer the question requiring in a tuple format. The tuple should contain a numeric value and a unit, +e.g., (1, foot), (2, cm), (4.3, meter), at the end.\n +Model response: The mirror in the image is approximately 5 feet 4 inches tall.\n +Extracted answer: (64, inch) +""" + example_4 = """ +Hint: Please answer the question requiring in a tuple format. The tuple should contain a numeric value and a unit, +e.g., (0.1, cm), (2.9, cm), (0.3, meter), at the end.\n +Model response: The minimum distance between the wooden chair and the chair near the camera in the image is 1.7 feet.\n +Extracted answer: (1.7, feet) +""" + example_5 = """ +Hint: Please answer the question requiring in a tuple format. The tuple should contain a numeric value and a unit, +e.g., (5.1, cm), (0.9, cm), (55, mm), at the end.\n +Model response: The height of the painting's bottom edge from the floor is approximately 4.5 feet.\n +Extracted answer: (4.5, feet) +""" + return [example_1, example_2, example_3, example_4, example_5] + + +def list_to_dict(lst): + return {chr(65 + i): val for i, val in enumerate(lst)} + + +def post_check(line, prefetch=False): + res = None + ans = line['answer'] + response = line['prediction'] if prefetch else line['res'] + try: + if line['question_type'] == 'multi_choice': + ans = line['answer_option'] + choices = list_to_dict(eval(line['choices'])) + res = can_infer(response, choices) + if prefetch: + return res + else: + if line['answer_type'] == 'integer': + res = int(response) + ans = int(line['answer']) + elif line['answer_type'] == 'float': + res = float(response) + ans = float(line['answer']) + else: + res = str(res) + ans = str(ans) + except ValueError: + pass + + if res == ans: + return res if prefetch else True + else: + return False + + +def build_qspatial_gpt4_prompt(line): + task_description = """ +Please read the following example. +Then extract the answer from the model response and type it at the end of the prompt.\n +""" + prediction = str(line['prediction']) + prompt = task_description + examples = get_gpt4_ICE_for_qspatial() + for example in examples: + prompt += example + '\n' + prompt += 'Model respone: ' + prediction + prompt += '\nExtracted answer:' + return prompt + + +def QSpatial_auxeval(model, line): + prompt = build_qspatial_gpt4_prompt(line) + + log = '' + retry = 5 + for i in range(retry): + prediction = line['prediction'] + res = model.generate(prompt, temperature=i * 0.5) + + if FAIL_MSG in res: + log += f'Try {i}: output is {prediction}, failed to parse.\n' + else: + log += 'Succeed' + return dict(log=log, res=res) + log += 'All 5 retries failed.\n' + return dict(log=log, res='') diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/seephys.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/seephys.py new file mode 100644 index 0000000000000000000000000000000000000000..975c147e09033069767829c7838484a1c44eec62 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/seephys.py @@ -0,0 +1,312 @@ +from ...smp import * +from collections import OrderedDict, defaultdict +import re +from sympy.parsing.latex import parse_latex +from sympy import latex, Eq, simplify +FAIL_MSG = 'Failed to obtain answer via API.' + +prompt_scoring = r""" +You are a physics professor, please determine if the Standard answer and Model Answer are equivalent. Note that the significant figures in the answer must meet the requirements. Your judgment should be 0 (non-equivalent) or 1 (equivalent). + +[Question]: A force of 20 N acts on an object of mass 5 kg. What is the acceleration of the object? +[Standard Answer]: 4 m/s² +[Model Answer] : 4 +Judgement: 1 + +[Question]: A projectile is launched at an angle $\\theta$ with initial velocity $v_0$. What is its time of flight before returning to the same height, assuming negligible air resistance and gravitational acceleration $g$? +[Standard Answer]: $$ t = \\frac{{2 v_0 \\sin(\\theta)}}{{g}} $$ +[Model Answer] : Extracted Answer: $$ t = \\frac{{2 v_0 \\cos(\\frac{\\pi}{2} - \\theta)}}{{g}} $$ +Judgement: 1 + +[Question]: The position of a particle is given by $x(t) = 3t^2 - 2t + 5$ meters. What is its instantaneous velocity at $t=2$ seconds? +[Standard Answer]: 10 m/s +[Model Answer] : Velocity $v(t) = dx/dt = 6t - 2$. At $t=2s$, $v(2) = 6(2) - 2 = 12 - 2 = 10$. So the velocity is 10 m/s. +Judgement: 1 + +[Question]: A car travels North at 20 m/s. It then turns and travels East at 20 m/s. What is the magnitude of its change in velocity? +[Standard Answer]: Approximately 28.3 m/s +[Model Answer] : The change in velocity is 0 m/s because the speed is the same. +Judgement: 0 + +[Question]: An object is thrown horizontally from a height of 20m with an initial speed of 10 m/s. Calculate: (a) the time it takes to hit the ground ($t_g$), and (b) the horizontal distance ($d_x$) it travels before hitting the ground. (Use g = 10 m/s²) +[Standard Answer]: (a) $t_g = 2$ s, (b) $d_x = 20$ m +[Model Answer] : (a) The time to hit the ground $t_g$ is 2 s. (b) The horizontal distance $d_x$ is 10 m. +Judgement: 0 + +[Question]: An engine performs $1.2 \\times 10^5$ J of work in 2 minutes. What is its average power output in watts? +[Standard Answer]: 1 kW +[Model Answer] : Power = Work / Time = $1.2 \\times 10^5$ J / (2 min * 60 s/min) = $1.2 \\times 10^5$ J / 120 s = 1000 W. +Judgement: 1 + +[Question]: A resistor has a voltage of 10V across it and a current of 2A flowing through it. What is its resistance and power dissipation? +[Standard Answer]: Resistance R = 5 Ohms , Power P = 20 Watts. +[Model Answer] : The resistance is $R = V/I = 10V / 2A = 5 \Omega$. The power dissipated is $P = VI = 10V \\times 2A = 20W$. +Judgement: 1 + +[Question]: The displacement of an object in Simple Harmonic Motion (SHM) is given by $x(t) = A \sin(\omega t)$. Determine the equation for its acceleration, $a(t)$. +[Standard Answer]: $$ a(t) = -A\omega^2 \sin(\omega t) $$ +[Model Answer] : The acceleration is the second derivative of displacement. $v(t) = A\omega \cos(\omega t)$. $a(t) = A\omega^2 \cos\left(\omega t + \\frac{\pi}{2}\\right)$. +Judgement: 1 + +[Question]: 给出相对论性粒子总能量 $E$ 的速度展开式(到 $v^4/c^4$ 项)。 +[Standard Answer]: $E = mc^2 \left(1 + \frac{v^2}{2c^2} + \frac{3v^4}{8c^4} + \mathcal{O}(v^6/c^6)\right)$ +[Model Answer]: $E = \gamma m c^2 = \frac{mc^2}{\sqrt{1 - v^2/c^2}} \approx mc^2 + \frac{1}{2}mv^2 + \frac{3}{8} \frac{mv^4}{c^2}$ +Judgement: 1 + +[Question]: 计算粒子能量 $E$ 穿过势垒 $V_0$ ($E < V_0$) 的透射系数 $T$。 +[Standard Answer]: $\ln T \approx \ln 16 + \ln\left(\frac{E}{V_0}\right) + \ln\left(1 - \frac{E}{V_0}\right) - \frac{2d}{\hbar} \sqrt{2m(V_0 - E)}$ +[Model Answer]: $T \approx 16 \frac{E}{V_0} \left(1 - \frac{E}{V_0}\right) e^{-2d\sqrt{2m(V_0 - E)}/\hbar}$ +Judgement: 1 + +[Question]: The position of a particle is given by $x(t) = (2t^3 - 3t)$ meters. What is its acceleration at $t=1$ second? The final answer should retain 3 significant figures. +[Standard Answer]: 12.0 m/s² +[Model Answer] : $v(t) = 6t^2 - 3$. $a(t) = 12.1t$. At $t=1s$, $a(1) = 12.1 \\text{ m/s}^2$. +Judgement: 0 +--- +Now please provide your judgement (0 or 1), DONNOT output explanation: +""" # noqa + + +def get_example(): + example_1 = """ +Question: What is the net force acting on a 5 kg object accelerating at 3 m/s² to the right?\n +Model response: Using F = ma, the net force is 15 N to the right.\n +Extracted answer: the net force is 15 N to the right. +""" # noqa + + example_2 = """ +Question: Calculate the charge of an electron. (Unit: C)\n +Model response: The elementary charge of an electron is approximately -1.602 × 10⁻¹⁹ coulombs.\n +Extracted answer: -1.602 × 10⁻¹⁹ C +""" # noqa + + example_3 = """ +Question: How much money does Luca need to buy a sour apple candy and a butter-scotch candy? (Unit: $)\n +Model response: Luca needs $1.45 to buy a sour apple candy and a butterscotch candy.\n +Extracted answer: $1.45 +""" # noqa + + example_4 = """ +Question: Between which frequencies does human hearing typically range? \n +Model response: Human hearing ranges between 20 Hz and 20,000 Hz.\n +Extracted answer: [20 Hz, 20000 Hz] +""" # noqa + + example_5 = """ +Question: List the wavelengths of visible light colors.\n +Model response: Visible light ranges from:\n +- Red: ~700 nm\n +- Green: ~550 nm\n +- Blue: ~450 nanometre\n +Extracted answer: Red: 700 nm; Green: 550 nm; Blue: 450 nanometre. +""" # noqa + return [example_1, example_2, example_3, example_4, example_5] + + +def build_extract_prompt(line): + task_description = """ +Please read the following example. +Then extract the answer from the model response and type it at the end of the prompt.\n # noqa +""" + question = "Please answer this question in the image." if str(line['question']) == 'nan' else line['question'] # noqa + + prediction = extract_by_rule(line) + prompt = task_description + examples = get_example() + for example in examples: + prompt += example + '\n' + prompt += question + '\n' + prompt += 'Model response: ' + prediction + prompt += 'Extracted answer:' + return prompt + + +def list_to_dict(lst): + return {chr(65 + i): val for i, val in enumerate(lst)} + + +def extract_by_rule(line): + response = line['prediction'] + try: + pattern = r'\s*(\d+)\s*' + match = re.search(pattern, response) + if match: + response = match.group(1) + return response + + except Exception: + pass + try: + pattern = r"the final answer is: (.+?)\." + match = re.search(pattern, response) + if match: + response = match.group(1) + return response + except Exception: + pass + try: + pattern = r"The answer is: (.+?)\." + match = re.search(pattern, response) + if match: + response = match.group(1) + return response + except Exception: + pass + + try: + response = int(response) + return str(response) + except Exception: + pass + try: + response = float(response) + return str(response) + except Exception: + pass + return str(response) + + +def quick_compare(response, answer, tol=1e-6): + if response is None or answer is None: + return False + + # 场景1:比较数值(如 "3.1415" vs "\pi") + if response.is_Number and answer.is_Number: + return abs(float(response - answer)) < tol + # 场景2:比较等式(如 "x = y" vs "y = x") + if isinstance(response, Eq) and isinstance(answer, Eq): + return simplify(response.lhs - response.rhs) == simplify(answer.lhs - answer.rhs) + + # 场景3:比较表达式(如 "\frac{x}{y}" vs "x/y") + return simplify(response - answer) == 0 + + +def post_check(line, prefetch=False): + # prefetch: return extracted response + ans = line['answer'] + try: + res = extract_by_rule(line) + except ValueError: + return False + + if str(res) == str(ans): + return str(res) if prefetch else True + + try: + parsed_res = parse_latex(res) + parsed_ans = parse_latex(ans) + if quick_compare(parsed_res, parsed_ans): + return latex(parsed_res) if prefetch else True + except Exception: + return False + return False + + +def extract(model, line): + log = '' + retry = 5 + if post_check(line, prefetch=False): + res = post_check(line, prefetch=True) + return dict(log='Prefetch succeed', extract=res, score=1) + else: + prompt = build_extract_prompt(line) + for i in range(retry): + prediction = line['prediction'] + res = model.generate(prompt, temperature=i * 0.5) # extract + if not res or FAIL_MSG in res: + log += f'Try {i}: output is {prediction}, failed to parse.\n' + else: + log += 'Succeed' + score = score_func(model, res, line['question'], line['answer']) + if score is None: + log += '\nScore failed' + return dict(log=log, extract=res, score=-1) + return dict(log=log, extract=res, score=score) + log += 'All 5 retries failed.\n' + return dict(log=log, extract='', score=-1) + + +def score_func(model, response, query, gt): + if not response: + return 0 + try: + full_prompt = prompt_scoring.strip() + f"\n[Question]: \{query}\\n[Standard Answer]: {gt}\\n[Model Answer]: {response}\\nJudgement: " # noqa + try_n = 0 + while try_n < 5: + score = model.generate(full_prompt, temperature=try_n * 0.3) + if 'Judgement: ' in score: + score = score.split('Judgement: ')[-1] + elif 'Judgement:' in score: + score = score.split('Judgement:')[-1] + elif 'judgement: ' in score: + score = score.split('judgement: ')[-1] + elif 'judgement:' in score: + score = score.split('judgement:')[-1] + try: + if int(score) == 0 or int(score) == 1: + return int(score) + except Exception: + continue + except Exception as e: + print("score_func Error!") + print(e) + return None + + +def eval_acc(result_file): + data = load(result_file) + keys = ['level', 'subject', 'language', 'source', 'vision_relevance', 'img_category', 'sig_figs'] + keys = [k for k in keys if k in data] + tot = {k: defaultdict(lambda: 0) for k in keys} + fetch = {k: defaultdict(lambda: 0) for k in keys} + hit = {k: defaultdict(lambda: 0) for k in keys} + tot['Overall'] = 0 + fetch['Overall'] = 0 + hit['Overall'] = 0 + + lt = len(data) + for i in range(lt): + item = data.iloc[i] + tot['Overall'] += 1 + for k in keys: + value = str(item[k]) + tot[k][value] += 1 + + if 'Prefetch succeed' in item['log']: + fetch['Overall'] += 1 + for k in keys: + value = str(item[k]) + fetch[k][value] += 1 + + if post_check(item, prefetch=False): + hit['Overall'] += 1 + for k in keys: + value = str(item[k]) + hit[k][value] += 1 + elif item['score'] == 1: + hit['Overall'] += 1 + for k in keys: + value = str(item[k]) + hit[k][value] += 1 + + res = {k: defaultdict(lambda: 0) for k in keys} + res['acc'] = 0 + res['prefetch_rate'] = 0 + + res['acc'] = hit['Overall'] / tot['Overall'] * 100 if tot['Overall'] > 0 else 0 + res['prefetch_rate'] = fetch['Overall'] / tot['Overall'] * 100 if tot['Overall'] > 0 else 0 + + def calculate_accuracy(hit_dict, tot_dict, res_dict, category): + for key in tot_dict[category]: + total = tot_dict[category][key] + hits = hit_dict[category][key] + res_dict[category][key] = hits / total * 100 if total > 0 else 0 + + for category in keys: + calculate_accuracy(hit, tot, res, category) + res_dict = { + 'Overall': { + 'Accuracy (%)': res['acc'], 'PrefetchRate (%)': res['prefetch_rate'] + }, **{cat: dict(res[cat]) for cat in keys} + } + return res_dict diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/__init__.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/aquarium/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/aquarium/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..6c10fe37bce681503cf13e8e38467f862ccc9eb6 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/aquarium/filter_prompt.json @@ -0,0 +1,118 @@ +[ + { + "role": "system", + "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only 0, e, or s; *, or 0, or _, or -, or e might indicates empty that you should convert to 0. If the perception / answer is not a valid solution or not existe, please put a Wrong there. Sometimes perception is not given but answer is there, you should put a Wrong for perception and process the answer. Your goal is only to format the user's input, not to check if it is correct or not."}, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n],\n\"think\": \"Based on the numbers given on the sides, each row and column must have 2 filled cells. To ensure contiguity at the bottom, the filled cells should be placed in the bottom two rows. The first and third columns have a constraint of 4 filled cells, so those columns must have the bottom two cells filled. The second and fourth columns can have any two cells filled in the bottom two rows.\",\n\"answer\": [\n [\"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\"],\n [\"s\", \"s\", \"s\", \"s\"],\n [\"s\", \"e\", \"s\", \"e\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[0,0,0,0],[0,0,0,0],[s,s,s,s],[s,0,s,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"]\n ],\n \"think\": \"The puzzle is a 4x5 grid. Each column and row has a number indicating how many cells should be filled in that row/column. The filled cells must be contiguous. \\nLet's start with the first column which has 4 filled cells. Since the filled cells must be contiguous, we can fill the bottom four cells of the first column. Similarly, the last column also has 4 filled cells, so we fill the bottom four cells. The second and third columns have 2 filled cells each. We can fill the bottom two cells in each of these columns. This satisfies the column constraints. Now let's check the row constraints. The bottom two rows have 4 filled cells each, which is satisfied by our current solution. The top two rows have 2 filled cells each, which is also satisfied. Therefore, this is the solution.\",\n \"answer\": [\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"s\", \"s\", \"s\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\", \"s\", \"s\"]\n ]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]\nSolution\n[[0,0,0,0,0],[0,0,0,0,0],[s,s,s,s,s],[s,s,s,s,s],[s,s,s,s,s]]" + + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content":"```json\n{\n\"perception\": [[\"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\"]],\n\"answer\": [[\"s\", \"s\", \"e\", \"e\"], [\"s\", \"s\", \"e\", \"e\"], [\"e\", \"e\", \"s\", \"s\"], [\"e\", \"e\", \"s\", \"s\"]]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[s,s,0,0],[s,s,0,0],[0,0,s,s],[0,0,s,s]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "**Current Game State:**\n```json\n{\n\"perception\": [\n[\"s\", \"e\", \"e\", \"e\"],\n[\"e\", \"e\", \"e\", \"s\"],\n[\"e\", \"s\", \"e\", \"s\"],\n[\"e\", \"e\", \"e\", \"s\"],\n[\"s\", \"e\", \"s\", \"e\"],\n[\"e\", \"s\", \"e\", \"e\"],\n[\"e\", \"e\", \"e\", \"s\"]\n]\n}\n```\n\n**Solution:**\n```json\n{\n\"answer\": [\n[\"s\", \"s\", \"e\", \"s\"],\n[\"e\", \"s\", \"s\", \"s\"],\n[\"e\", \"s\", \"e\", \"s\"],\n[\"e\", \"e\", \"s\", \"s\"],\n[\"s\", \"s\", \"e\", \"e\"],\n[\"e\", \"s\", \"e\", \"s\"],\n[\"s\", \"e\", \"e\", \"s\"]\n]\n}\n```\n\n**Final Answer:**\nThe solution is represented by the \"answer\" array, where \"s\" denotes filled cells and \"e\" denotes empty cells." + }, + { + "role": "assistant", + "content": "Initial State\n[[s,0,0,0],[0,0,0,s],[0,s,0,s],[0,0,0,s],[s,0,s,0],[0,s,0,0],[0,0,0,s]]\nSolution\n[[s,s,0,s],[0,s,s,s],[0,s,0,s],[0,0,s,s],[s,s,0,0],[0,s,0,s],[s,0,0,s]]" + + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n[0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0],\n[0, 0, 0, 0, 0]\n],\n\"think\": \"The grid is initially empty. We need to fill the aquariums to satisfy the given row and column constraints. The numbers on the sides indicate the total number of filled cells in each row and column. We can start by placing the first filled cell in each row and column as shown in the solution. Then, we can fill the remaining cells while ensuring that each aquarium's water cells are contiguous at the bottom of that region.\",\n\"answer\": [\n[0, 0, 0, 0, 0],\n[0, 1, 1, 0, 0],\n[0, 1, 1, 0, 0],\n[0, 1, 1, 0, 0],\n[0, 1, 1, 0, 0]\n]\n}" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]\nSolution\nWrong" + + }, + { + "role": "user", + "content": "Yes. Because 1 is undefined, so put a Wrong there" + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n[0, 0, 0, 0],\n[0, 0, 0, 0],\n[0, 2, 2, 0],\n[0, 2, 2, 0]\n],\n\"answer\": [\n[0, 0, s, s],\n[0, 0, s, s],\n[0, s, s, 0],\n[0, s, s, 0]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\nWrong\nSolution\n[[0,0,s,s],[0,0,s,s],[0,s,s,0],[0,s,s,0]]" + + }, + { + "role": "user", + "content": "Yes. As 2 is not defined" + }, + { + "role": "user", + "content": "```json\n{\n\"answer\": [\n [\"e\", \"e\", \"s\", \"s\"],\n [\"e\", \"s\", \"s\", \"e\"],\n [\"s\", \"s\", \"e\", \"e\"],\n [\"s\", \"e\", \"e\", \"e\"]\n]\n}\n```\n\nThis solution satisfies the given constraints:\n\n1. Each aquarium's water cells are contiguous at the bottom of that region.\n2. The row counts match the number of filled cells in each row: [0, 2, 2, 2].\n3. The column counts match the number of filled cells in each column: [2, 2, 2, 2].\n\nThe aquariums are filled as follows:\n- Aquarium 1: [[2, 0], [1, 0], [0, 0]]\n- Aquarium 2: [[2, 1], [2, 2]]\n- Aquarium 3: [[1, 3], [0, 3], [0, 2], [1, 2], [1, 1], [0, 1]]\n- Aquarium 4: [[2, 3], [3, 3], [3, 2]]" + }, + { + "role": "assistant", + "content": "Initial State\nWrong\nSolution\n[[0,0,s,s],[0,s,s,0],[s,s,0,0],[s,0,0,0]]" + }, + { + "role": "user", + "content": "Yes. there is no perception, but the answer is valid, so put it there." + }, + { + "role": "user", + "content": "```json\n{\n \"think\": \"To solve this puzzle, I will first identify the filled cells and their positions in the given board state. Then, I will use these positions to fill the aquariums with water according to the given row and column counts. Finally, I will check if the solution is correct by ensuring that the water cells are contiguous at the bottom of each region.\",\n \"answer\": [\n [\"s\", \"s\", \"e\", \"e\"],\n [\"s\", \"s\", \"e\", \"e\"],\n [\"s\", \"s\", \"e\", \"e\"],\n [\"e\", \"s\", \"e\", \"s\"]\n ]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\nWrong\nSolution\n[[s,s,0,0],[s,s,0,0],[s,s,0,0],[0,s,0,s]]" + + }, + { + "role": "user", + "content": "Yes. there is no perception, but the answer is valid, so put it there." + }, + { + "role": "user", + "content": "```json\n{\n\"answer\": [\n[1, 1, 1, 1],\n[1, 0, 0, 1],\n[1, 0, 0, 1],\n[1, 0, 0, 1]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\nWrong\nSolution\nWrong" + }, + { + "role": "user", + "content": "Yes. there is no perception, so Wrong; for answer, 1 is not defined." + } +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/battleships/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/battleships/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..7839a70baaf833d4589b170dfe72f69647c169da --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/battleships/filter_prompt.json @@ -0,0 +1,91 @@ +[ + { + "role": "system", + "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only three types values 0 or s; if you see * or _ or - or e or #, you should convert them into 0. But if there is any number in the grid, it is Wrong. If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not" + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"s\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n],\n\"think\": \"Based on the given information, there is a ship of size 2 and two ships of size 1. The row hints indicate that the ship of size 2 must be placed in the second row, and the column hints suggest that one of the size 1 ships must be placed in the second column. The other size 1 ship can be placed in either the first, third, or fourth column without violating the rules.\",\n\"answer\": [\n [\"e\", \"s\", \"e\", \"e\"],\n [\"e\", \"s\", \"e\", \"e\"],\n [\"e\", \"e\", \"s\", \"e\"],\n [\"e\", \"e\", \"e\", \"s\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0],[0,s,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[0,s,0,0],[0,s,0,0],[0,0,s,0],[0,0,0,s]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "model_output\": \"```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"s\", \"s\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"]\n ],\n \"answer\": [\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"s\", \"s\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"s\"],\n [\"e\", \"s\", \"e\", \"e\", \"e\"]\n ]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,s,s,0],[0,0,0,0,0],[0,0,0,0,0]]\nSolution\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,s,s,0],[0,0,0,0,s],[0,s,0,0,0]]" + + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "**Step 1: Analyze the given information**\n\n* The grid is 6x6.\n* There are three ships of sizes 2, 1, and 1.\n* The hints indicate that there are two cells in the first row and one cell in the second row.\n* There is one cell in the first column and one cell in the second column.\n\n**Step 2: Place the ships**\n\n* We can start by placing the two-cell ship in the first row. We can place it in the first two columns or the last two columns.\n* Let's place it in the first two columns. This leaves us with a 4x4 grid to place the remaining ships.\n* We can place the one-cell ship in the second row and the first column.\n* This leaves us with a 4x4 grid to place the remaining one-cell ship.\n* We can place it in the second row and the second column.\n\n**Step 3: Check for any constraints**\n\n* There are no constraints mentioned in the problem, so we can proceed with our current placement.\n\n**Step 4: Write the solution**\n\n```json\n{\"perception\": [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], \"think\": \"Place the two-cell ship in the first two columns, then place the one-cell ship in the second row and the first column, and finally place the remaining one-cell ship in the second row and the second column.\", \"answer\": [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]}\n```\n\n**Answer:** {\"perception\": [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], \"think\": \"Place the two-cell ship in the first two columns, then place the one-cell ship in the second row and the first column, and finally place the remaining one-cell ship in the second row and the second column.\", \"answer\": [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]}" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]]\nSolution\n[[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n[\"#\", \"s\", \"#\", \"#\"],\n[\"#\", \"s\", \"#\", \"#\"],\n[\"#\", \"#\", \"#\", \"#\"],\n[\"#\", \"#\", \"#\", \"#\"]\n],\n\"think\": \"The grid is 4x4, and there are two ships: one of size 1 and one of size 2. The size-1 ship is already placed in the bottom-left corner (x1=0, y1=3). The size-2 ship must be placed horizontally or vertically, not overlapping with the size-1 ship.\",\n\"answer\": [\n[\"#\", \"s\", \"#\", \"#\"],\n[\"#\", \"s\", \"#\", \"#\"],\n[\"#\", \"#\", \"#\", \"#\"],\n[\"#\", \"#\", \"#\", \"#\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,s,0,0],[0,s,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[0,s,0,0],[0,s,0,0],[0,0,0,0],[0,0,0,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n[\"*\", \"*\", \"*\", \"e\"],\n[\"*\", \"*\", \"s\", \"*\"],\n[\"*\", \"*\", \"s\", \"*\"],\n[\"*\", \"e\", \"*\", \"*\"]\n],\n\"think\": \"Let's analyze this 4x4 grid:\n1. We need to place 1 size-2 ship and 2 size-1 ships\n2. The row hints (0,2,1,1) and column hints (2,0,2,0) tell us how many cells are ships\n3. We can see:\n - A size-2 ship vertically placed in column 2, rows 1-2\n - Two water cells marked with dots\n4. Looking at row 0 with hint 0, all cells must be water\n5. Column 1 has hint 0, so all cells must be water\n6. The remaining two size-1 ships must be placed in valid positions without touching the existing size-2 ship\n7. Given the constraints, one size-1 ship must be in row 3, column 0 and the other in row 3, column 2\",\n\"answer\": [\n[\"e\", \"e\", \"e\", \"e\"],\n[\"e\", \"e\", \"s\", \"e\"],\n[\"e\", \"e\", \"s\", \"e\"],\n[\"s\", \"e\", \"s\", \"e\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0],[0,0,s,0],[0,0,s,0],[0,0,0,0]]\nSolution\n[[0,0,0,0],[0,0,s,0],[0,0,s,0],[s,0,s,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\"]\n ],\n \"think\": \"Analyze the given grid. We are given hints indicating how many ship cells are in each row and column. Small black circles represent single-cell ship cells, and two-cell ships must be placed as per the hints. From the row and column numbers, I will determine where the ship segments must go, ensuring no ships touch each other, even diagonally.\",\n \"answer\": [\n [],\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0],[0,0,0],[0,0,0]]\nSolution\nWrong" + }, + { + "role": "user", + "content": "Yes. as there is no answer could be extracted from the model output" + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"s\"]\n ],\n \"answer\": [\n [\"e\", \"s\", \"e\", \"e\"],\n [\"e\", \"e\", \"s\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\"],\n [\"s\", \"e\", \"e\", \"s\"]\n ]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,s]]\nSolution\n[[0,s,0,0],[0,0,s,0],[0,0,0,0],[s,0,0,s]]" + }, + { + "role": "user", + "content": "Yes. " + } +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/binairo/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/binairo/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..df36714bdd387fb23c4173153b69787fcc324af9 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/binairo/filter_prompt.json @@ -0,0 +1,67 @@ +[ + { + "role": "system", + "content": + "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only three types values 0 or w or b; if you see * or _ or - or e or #, you should convert them into 0. If you see other numbers such as 1, 2, or 3, it is wrong.If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not" + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n[\"*\", \"*\", \"w\", \"w\"],\n[\"b\", \"b\", \"*\", \"w\"],\n[\"w\", \"w\", \"*\", \"b\"],\n[\"w\", \"*\", \"*\", \"*\"]\n],\n\"think\": [\n\"1. Looking at row 1 (0-based), there are two black circles (b,b), so the remaining two must be white to avoid three consecutive blacks\",\n\"2. Looking at column 3 (0-based), there are already three whites (w,w,w), so the last cell must be black\",\n\"3. In row 2, we have (w,w,*,b) - to avoid three whites in a row, the empty cell must be black\",\n\"4. In row 3, we have (w,*,*,*) - given the column constraints and avoiding three consecutive same colors, it must be (w,b,w,b)\",\n\"5. This completes the grid while satisfying all constraints\"\n],\n\"answer\": [\n[\"b\", \"b\", \"w\", \"w\"],\n[\"b\", \"b\", \"w\", \"w\"],\n[\"w\", \"w\", \"b\", \"b\"],\n[\"w\", \"b\", \"w\", \"b\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Perception\n[[0,0,w,w],[b,b,0,w],[w,w,0,b],[w,0,0,0]]\nAnswer\n[[b,b,w,w],[b,b,w,w],[w,w,b,b],[w,b,w,b]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [\n [\"b\", \"b\", \"*\", \"*\"],\n [\"*\", \"b\", \"*\", \"w\"],\n [\"*\", \"*\", \"b\", \"b\"],\n [\"w\", \"*\", \"w\", \"b\"]\n ],\n \"think\": \"To solve this, we apply the Binairo rules. Here\u2019s the step-by-step reasoning: \n1. Row 0: The first two cells are 'b'. According to the rules, no more than two same-colored pieces can be adjacent. Thus, the third cell must be 'w'.\n2. Row 3: The fourth cell is 'b', and the second cell is blank. Following the row's structure and aiming for balanced numbers of 'b' and 'w', the second cell must be 'b.\"\n3. Each horizontal" + }, + { + "role": "assistant", + "content": "Perception\n[[b,b,0,0],[0,b,0,w],[0,0,b,b],[w,0,w,b]]\nAnswer\nWrong" + }, + { + "role": "user", + "content": "Yes. as there is no answer section, so you should put a Wrong there." + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"w\"],\n [\"b\", \"b\", \"*\", \"*\"],\n [\"w\", \"*\", \"w\", \"b\"],\n [\"w\", \"*\", \"*\", \"*\"]\n ],\n \"answer\": [\n [\"b\", \"w\", \"b\", \"w\"],\n [\"b\", \"b\", \"w\", \"b\"],\n [\"w\", \"b\", \"w\", \"b\"],\n [\"w\", \"b\", \"w\", \"b\"]\n ]\n}\n```" + }, + { + "role": "assistant", + "content": "Perception\n[[0,0,0,w],[b,b,0,0],[w,0,w,b],[w,0,0,0]]\nAnswer\n[[b,w,b,w],[b,b,w,b],[w,b,w,b],[w,b,w,b]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"w\"],\n [\"b\", \"b\", \"*\", \"*\"],\n [\"w\", \"*\", \"w\", \"b\"],\n [\"w\", \"*\", \"*\", \"*\"]\n ],\n \"answer\": [\n [\"b\", \"w\", \"b\", \"w\"],\n [\"b\", \"b\", \"w\", \"b\"],\n [\"w\", \"b\", \"w\", \"b\"],\n [\"w\", \"b\", \"w\", \"b\"]\n ]\n}\n```" + }, + { + "role": "assistant", + "content": "Perception\n[[0,0,0,w],[b,b,0,0],[w,0,w,b],[w,0,0,0]]\nAnswer\n[[b,w,b,w],[b,b,w,b],[w,b,w,b],[w,b,w,b]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "The user wants me to solve a Binairo puzzle.\nI need to identify the black and white circles in the image and represent them as 'b' and 'w' respectively. Empty cells will be represented by '*'.\nThen, I need to apply the Binairo rules to fill the empty cells.\nThe rules are:\n1. No more than two adjacent cells can have the same color.\n2. Each row and column must have an equal number of black and white cells.\n\nLet's represent the initial state of the grid.\nRow 0: *, *, *, b\nRow 1: w, *, *, *\nRow 2: *, b, w, *\nRow 3: b, *, *, w\n\nNow, let's apply the rules to fill the empty cells.\n\n**Step 1:** Analyze row 0. It has one 'b'. It needs one more 'b' and two 'w'.\n**Step 2:** Analyze row 1. It has one 'w'. It needs one more 'w' and two 'b'.\n**Step 3:** Analyze row 2. It has one 'b' and one 'w'. It needs one more 'b' and one more 'w'.\n**Step 4:** Analyze row 3. It has one 'b' and one 'w'. It needs one more 'b' and one more 'w'.\n\n**Step 5:** Analyze column 0. It has one 'w' and one 'b'. It needs one more 'w' and one more 'b'.\n**Step 6:** Analyze column 1. It has one 'b'. It needs one more 'b' and two 'w'.\n**Step 7:** Analyze column 2. It has one 'w'. It needs one more 'w' and two 'b'.\n**Step 8:** Analyze column 3. It has one 'b' and one 'w'. It needs one more 'b' and one more 'w'.\n\nLet's start filling based on the no-three-in-a-row rule.\nIn row 0, the last cell is 'b'. So the cell at (0,2) cannot be 'b'.\nIn row 3, the first cell is 'b'. So the cell at (3,1) cannot be 'b'.\n\nConsider row 0: *, *, *, b. If the second to last cell is 'b', we have *, *, b, b. Then the first two must be 'w'. So w, w, b, b. This is valid.\nConsider row 3: b, *, *, w. If the second cell is 'w', we have b, w, *, w. The third cell must be 'b'. So b, w, b, w. This is valid.\n\nLet's update the grid:\nw, w, b, b\nw, *, *, *\n*, b, w, *\nb, w, b, w\n\nConsider row 1: w, *, *, *. It needs one more 'w' and two 'b'.\nConsider row 2: *, b, w, *. It needs one more 'b' and one more 'w'.\n\nConsider column 1: w, *, b, w. The empty cell must be 'b'.\nConsider column 2: b, *, w, b. The empty cell must be 'w'.\n\nLet's update the grid:\nw, w, b, b\nw, b, w, *\n*, b, w, *\nb, w, b, w\n\nConsider row 1: w, b, w, *. The last cell must be 'b'.\nConsider row 2: *, b, w, *. The last cell must be 'b'.\n\nLet's update the grid:\nw, w, b, b\nw, b, w, b\n*, b, w, b\nb, w, b, w\n\nConsider row 2: *, b, w, b. The first cell must be 'w'.\n\nLet's update the grid:\nw, w, b, b\nw, b, w, b\nw, b, w, b\nb, w, b, w\n\nNow let's check the column counts.\nColumn 0: w, w, w, b (3w, 1b) - Incorrect.\nThere must be an error in the previous steps.\n\nLet's go back to the state:\nw, w, b, b\nw, *, *, *\n*, b, w, *\nb, w, b, w\n\nConsider row 1: w, *, *, *. It needs one more 'w' and two 'b'.\nIf the second cell is 'b', we have w, b, *, *.\nIf the third cell is 'b', we have w, b, b, *. The last cell must be 'w'. So w, b, b, w.\nConsider row 2: *, b, w, *. It needs one more 'b' and one more 'w'.\n\nLet's update the grid:\nw, w, b, b\nw, b, b, w\n*, b, w, *\nb, w, b, w\n\nConsider column 0: w, w, *, b. The empty cell must be 'b'.\nConsider column 3: b, w, *, w. The empty cell must be 'b'.\n\nLet's update the grid:\nw, w, b, b\nw, b, b, w\nb, b, w, b\nb, w, b, w\n\nConsider row 2: b, b, w, b. This violates the no-three-in-a-row rule.\n\nLet's go back to the state:\nw, w, b, b\nw, *, *, *\n*, b, w, *\nb, w, b, w\n\nConsider row 1: w, *, *, *. It needs one more 'w' and two 'b'.\nIf the second cell is 'w', we have w, w, *, *. The last two must be 'b'. So w, w, b, b. This is the same as row 0, which is not allowed.\n\nLet's try filling based on column constraints.\nColumn 0: w, *, *, b. It needs one 'w' and one 'b'.\nColumn 1: w, b, b, w. This is valid.\nColumn 2: b, w, w, b. This is valid.\nColumn 3: b, *, *, w. It needs one 'w' and one 'b'.\n\nLet's go back to the initial state:\n*, *, *, b\nw, *, *, *\n*, b, w, *\nb, *, *, w\n\nFrom row 0: *, *, *, b. The cells at (0,0) and (0,1) cannot be 'b'.\nFrom row 3: b, *, *, w. The cell at (3,1) cannot be 'b'.\n\nFrom column 1: *, *, b, *. The cells at (0,1) and (3,1) cannot be 'b'.\nFrom column 2: *, *, w, *. The cells at (0,2) and (3,2) cannot be 'w'.\n\nConsider row 0: *, *, *, b. If the first two are 'w', we have w, w, *, b. The third must be 'b'. So w, w, b, b.\nConsider row 3: b, *, *, w. If the middle two are 'w', we have b, w, w, w. This is invalid.\n\nLet's restart the thought process with the initial grid.\n```json\n{\n\"perception\": [\n[\"*\", \"*\", \"*\", \"b\"],\n[\"w\", \"*\", \"*\", \"*\"],\n[\"*\", \"b\", \"w\", \"*\"],\n[\"b\", \"*\", \"*\", \"w\"]\n],\n\"think\": \"The grid is 4x4. Each row and column must have two 'w' and two 'b'.\\n\\n**Step 1:** Analyze row 0: [*, *, *, b]. To avoid three consecutive 'b', the cells at (0,0) and (0,1) cannot both be 'b'.\\n\\n**Step 2:** Analyze row 3: [b, *, *, w]. To avoid three consecutive 'b', the cell at (3,1) cannot be 'b'.\\n\\n**Step 3:** Analyze column 1: [*, *, b, *]. To avoid three consecutive 'b', the cell at (0,1) cannot be 'b'.\\n\\n**Step 4:** Consider row 0: [*, *, *, b]. If we place 'w' in (0,0) and (0,1), we get [w, w, *, b]. The remaining cell must be 'b' to have two 'b's in the row: [w, w, b, b]. This is valid.\\n\\n**Step 5:** Consider row 3: [b, *, *, w]. If we place 'w' in (3,1), we get [b, w, *, w]. The remaining cell must be 'b': [b, w, b, w]. This is valid.\\n\\n**Step 6:** Update the grid:\\n[w, w, b, b]\\n[w, *, *, *]\\n[*, b, w, *]\\n[b, w, b, w]\\n\\n**Step 7:** Consider column 1: [w, *, b, w]. The missing cell must be 'b': [w, b, b, w].\\n\\n**Step 8:** Consider column 2: [b, *, w, b]. The missing cell must be 'w': [b, w, w, b].\\n\\n**Step 9:** Update the grid:\\n[w, w, b, b]\\n[w, b, w, *]\\n[*, b, w, b]\\n[b, w, b, w]\\n\\n**Step 10:** Consider row 1: [w, b, w, *]. The missing cell must be 'b': [w, b, w, b].\\n\\n**Step 11:** Consider row 2: [*, b, w, b]. The missing cell must be 'w': [w, b, w, b].\\n\\n**Step 12:** Update the grid:\\n[w, w, b, b]\\n[w, b, w, b]\\n[w, b, w, b]\\n[b, w, b, w]\\n\\n**Step 13:** Check column counts:\\nColumn 0: w, w, w, b (Incorrect)\\n\\nLet's backtrack to Step 6 and try a different approach.\\n\\n**Step 6 (Retry):** Consider row 1: [w, *, *, *]. To have two 'w's, let's place 'w' at (1,2). Then we have [w, *, w, *]. The remaining cells must be 'b': [w, b, w, b].\\n\\n**Step 7:** Consider row 2: [*, b, w, *]. To have two 'b's, let's place 'b' at (2,0). Then we have [b, b, w, *]. The remaining cell must be 'w': [b, b, w, w].\\n\\n**Step 8:** Update the grid:\\n[w, w, b, b]\\n[w, b, w, b]\\n[b, b, w, w]\\n[b, w, b, w]\\n\\n**Step 9:** Check column counts:\\nColumn 0: w, w, b, b\\nColumn 1: w, b, b, w\\nColumn 2: b, w, w, b\\nColumn 3: b, b, w, w\\nAll columns have two 'w' and two 'b'.\\n\\n**Step 10:** Check row counts:\\nRow 0: w, w, b, b\\nRow 1: w, b, w, b\\nRow 2: b, b, w, w\\nRow 3: b, w, b, w\\nAll rows have two 'w' and two 'b'.\\n\\n**Step 11:** Check for three consecutive same colors.\\nNo three consecutive same colors in any row or column.\",\n\"answer\": [\n[\"w\", \"w\", \"b\", \"b\"],\n[\"w\", \"b\", \"w\", \"b\"],\n[\"b\", \"b\", \"w\", \"w\"],\n[\"b\", \"w\", \"b\", \"w\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Perception\n[[0,0,0,b],[w,0,0,0],[0,b,w,0],[b,0,0,w]]\nAnswer\n[[w,w,b,b],[w,b,w,b],[b,b,w,w],[b,w,b,w]]" + }, + { + "role": "user", + "content": "Yes. " + } +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/fieldexplore/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/fieldexplore/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..80a85ca514f066c6dd828c4e8f3a63be72d598a5 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/fieldexplore/filter_prompt.json @@ -0,0 +1,102 @@ +[ + { + "role": "system", + "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only 0 or s or numbers; *, or 0, or _, or -, or e might indicates empty that you should convert to 0. If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not"}, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"*\", \"*\", \"*\", \"*\"],\n [\"1\", \"*\", \"*\", \"2\"],\n [\"2\", \"*\", \"*\", \"*\"],\n [\"*\", \"s\", \"3\", \"*\"]\n],\n\"think\": \"Based on the given information, we can make the following deductions:\n1. The cell with the value '1' has one mine adjacent to it, which must be the revealed mine cell.\n2. The cell with the value '2' in the second row has two mines adjacent to it. One of them is the revealed mine, so there must be another mine in one of the remaining hidden cells adjacent to it.\n3. The cell with the value '3' has three mines adjacent to it. One of them is the revealed mine, so there must be two more mines in the remaining hidden cells adjacent to it.\n4. The cell with the value '2' in the third row has two mines adjacent to it. One of them is the revealed mine, so there must be another mine in one of the remaining hidden cells adjacent to it.\n5. Based on the constraints of the '2' values, the only possible location for the second mine adjacent to the '2' in the third row is the cell to its left.\n6. With the second mine location determined, the remaining two mines adjacent to the '3' cell must be in the two cells above it.\",\n\"answer\": [\n [\"e\", \"e\", \"s\", \"e\"],\n [\"1\", \"e\", \"e\", \"2\"],\n [\"2\", \"s\", \"e\", \"e\"],\n [\"e\", \"s\", \"3\", \"s\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0],[1,0,0,2],[2,0,0,0],[0,s,3,0]]\nSolution\n[[0,0,s,0],[1,0,0,2],[2,s,0,0],[0,s,3,s]]" + }, + { + "role": "user", + "content": "Right" + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [\n [1, 2, \"*\", \"*\", \"*\"],\n [1, \"*\", \"*\", 3, 2],\n [2, 3, \"*\", \"*\", \"*\"],\n [1, \"*\", \"*\", 3, \"*\"]\n ],\n \"think\": \"Let's analyze the grid step by step.\\n\\n1. Look at the '3' at (1, 3). It has at most one hidden mine. If there is a mine at (1, 2), then the '3' at (1, 3) will be satisfied. \\n2. Look at the '2' at (0, 1). It has at most one hidden mine. If there is a mine at (1, 1), then the '2' at (0, 1) will be satisfied. \\n3. Look at the '1' at (0, 0). It has at most one hidden mine. If there is a mine at (1, 0), then the '1' at (0, 0) will be satisfied. \\n4. Look at the '2' at (2, 0). It has at most one hidden mine. If there is a mine at (2, 1), then the '2' at (2, 0) will be satisfied. \\n5. Look at the '3' at (3, 3). It has at most one hidden mine. If there is a mine at (3, 2), then the '3' at (3, 3) will be satisfied. \\n6. Look at the '1' at (3, 0). It has at most one hidden mine. If there is a mine at (3, 1), then the '1' at (3, 0) will be satisfied. \\n7. Considering the above analysis, we can place mines at (1, 1), (1, 2), (2, 1), (3, 1), (3, 2).\",\n \"answer\": [\n [1, 2, \"e\", \"e\", \"e\"],\n [1, \"s\", \"s\", 3, 2],\n [2, 3, \"s\", \"e\", \"e\"],\n [1, \"s\", \"s\", 3, \"e\"]\n ]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[1,2,0,0,0],[1,0,0,3,2],[2,3,0,0,0],[1,0,0,3,0]]\nSolution\n[[1,2,0,0,0],[1,s,s,3,2],[2,3,s,0,0],[1,s,s,3,0]]" + }, + { + "role": "user", + "content": "Right" + }, + { + "role": "user", + "content": "To solve this problem, we need to analyze the given grid and use the information provided by the numbers to deduce the locations of the mines. \n\nThe grid is a 4x4 grid with some cells already revealed with numbers indicating how many mines are adjacent to them (including diagonals). The goal is to identify the locations of the mines based on this information.\n\nLet's start by examining the revealed cells:\n\n* Cell (0,0) has a value of 1, indicating that there is one mine adjacent to it.\n* Cell (0,1) has a value of 2, indicating that there are two mines adjacent to it.\n* Cell (1,0) has a value of 1, indicating that there is one mine adjacent to it.\n* Cell (1,1) has a value of 3, indicating that there are three mines adjacent to it.\n* Cell (2,2) has a value of 2, indicating that there are two mines adjacent to it.\n* Cell (2,3) has a value of 3, indicating that there are three mines adjacent to it.\n* Cell (3,2) has a value of 2, indicating that there are two mines adjacent to it.\n* Cell (3,3) has a value of 3, indicating that there are three mines adjacent to it.\n\nBased on this information, we can start making deductions about the locations of the mines.\n\n* Since cell (0,0) has a value of 1, it means that there is exactly one mine adjacent to it. The only possible location for this mine is cell (0,1), because if the mine were in any other adjacent cell, it would not be adjacent to cell (0,0).\n* Similarly, since cell (0,1) has a value of 2, it means that there are exactly two mines adjacent to it. One of these mines is already accounted for by cell (0,0), so the other mine must be in cell (1,1).\n* Continuing this process, we can deduce the locations of the remaining mines.\n\nHere is the solution:\n\n```json\n{\"perception\": [[1, 2, 's', '*'], [1, 3, 's', 2], [2, 3, 's', 2], ['*', 3, 3, 's']], \"think\": \"Use the given numbers to deduce the locations of the mines.\", \"answer\": [['e', 'e', 's', 'e'], ['e', 'e', 's', 'e'], ['e', 'e', 's', 'e'], ['e', 'e', 'e', 's']]}\n```\n\nIn the solution, 'e' indicates an empty cell (no mine), and 's' indicates a cell with a mine." + + }, + { + "role": "assistant", + "content": "Initial State\n[[1,2,s,0],[1,3,s,2],[2,3,s,2],[0,3,3,s]]\nSolution\n[[0,0,s,0],[0,0,s,0],[0,0,s,0],[0,0,0,s]]" + }, + { + "role": "user", + "content": "Right" + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]\n ],\n \"think\": [\n \"There are no numbers or mines visible in this grid. Since it's a standard game layout, I'll need to start with general reasoning to identify potential mine locations based on the given number clues.\",\n \"Starting with the top left and moving across rows, the number 1 indicates that there is a single mine in one of the four adjacent cells. Similarly, the number 2 suggests a single mine in two of the adjacent cells. The number 3 indicates a single mine in three of the adjacent cells. However, without more information, we cannot pinpoint the exact locations of the mines.\",\n \"To narrow down the possibilities, I'll need to identify the patterns created by the revealed numbers. For example, if two cells have a number 3, we can infer that the mine must be in one of those cells. Similarly, if two cells have a number 1, we can infer that the mine must be in one of those cells. By analyzing the revealed numbers and their patterns, we can make educated guesses about the mine locations.\"\n ],\n \"answer\": [\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]\n ]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\nSolution\n[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]" + }, + { + "role": "user", + "content": "Right" + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"1\", \"2\", \"*\", \"1\", \"*\"],\n [\"*\", \"*\", \"3\", \"*\", \"2\"],\n [\"2\", \"3\", \"3\", \"*\", \"*\"],\n [\"*\", \"*\", \"3\", \"*\", \"*\"]\n],\n\"think\": \"We start by analyzing the revealed numbers. The grid indicates the number of adjacent mines for the cells. We will deduce where the mines must be based on the numbers given:\\n- The cell (0, 0) is 1 and only touches (0, 1), so (0, 1) has a mine, thus confirming 1 mine at (0, 1).\\n- The cell (0, 3) is also 1, and it can only have a mine in (1, 2) since (0, 2) is adjacent to another number.\\n- For cell (1, 2) which is 3, it should have 3 adjacent mines. Currently (0, 1) is already one of them.\\n- The cell (3, 2) is 3; for it to be true, it requires careful placement in surrounding cells (2, 1), (2, 2), and (2, 3) based on existing clues.\\nBy logically deducing, it turns out that the placement of mines leads to the conclusion below:\\n- Cells (0, 1), (1, 2), (2, 1), (2, 2), (2, 3), and (3, 2) are filled with mines. The remaining cells turn out not to have any mines.\",\n\"answer\": [\n [\"e\", \"s\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"s\", \"e\", \"s\"],\n [\"s\", \"s\", \"s\", \"e\", \"e\"],\n [\"e\", \"e\", \"s\", \"e\", \"e\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[1,2,0,1,0],[0,0,3,0,2],[2,3,3,0,0],[0,0,3,0,0]]\nSolution\n[[0,s,0,0,0],[0,0,s,0,s],[s,s,s,0,0],[0,0,s,0,0]]" + }, + { + "role": "user", + "content": "Right" + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n[\"*\", \"2\", \"*\", \"*\"],\n[\"*\", \"s\", \"3\", \"*\"],\n[\"*\", \"*\", \"*\", \"*\"],\n[\"1\", \"s\", \"3\", \"*\"]\n],\n\"think\": \"1. The '2' at [0, 1] has one revealed mine adjacent to it at [1, 1]. Thus, there must be one more mine adjacent to it. Since the other adjacent cells are [0, 0], [0, 2], [1, 0], and [1, 2], and [1, 2] is adjacent to '3' at [1, 2] which already has a mine at [1, 1], the mine must be at [0, 0], [0, 2], or [1, 0].\\n2. The '3' at [1, 2] has one revealed mine adjacent to it at [1, 1]. Thus, there must be two more mines adjacent to it. The adjacent cells are [0, 1], [0, 2], [0, 3], [1, 1], [1, 3], [2, 1], [2, 2], [2, 3]. Since [0, 1] has a '2' and already has a mine adjacent to it, it cannot be a mine. Thus, the two mines must be among [0, 2], [0, 3], [1, 3], [2, 1], [2, 2], [2, 3].\\n3. The '1' at [3, 0] has one revealed mine adjacent to it at [3, 1]. Thus, all other adjacent cells must be empty. \\n4. The '3' at [3, 2] has one revealed mine adjacent to it at [3, 1]. Thus, there must be two more mines adjacent to it. The adjacent cells are [2, 1], [2, 2], [2, 3], [3, 1], [3, 3]. Since [3, 1] is already a mine, the two mines must be among [2, 1], [2, 2], [2, 3], [3, 3].\",\n\"answer\": [\n[\"s\", \"2\", \"s\", \"e\"],\n[\"e\", \"s\", \"3\", \"s\"],\n[\"e\", \"e\", \"s\", \"e\"],\n[\"1\", \"s\", \"3\", \"s\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,2,0,0],[0,s,3,0],[0,0,0,0],[1,s,3,0]]\nSolution\n[[s,2,s,0],[0,s,3,s],[0,0,s,0],[1,s,3,s]]" + }, + { + "role": "user", + "content": "Right" + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"*\", \"*\", \"*\", \"*\"],\n [\"1\", \"*\", \"*\", \"2\"],\n [\"2\", \"*\", \"*\", \"*\"],\n [\"*\", \"s\", \"3\", \"*\"]\n],\n\"answer\": [\n [\"e\", \"e\", \"s\", \"e\"],\n [\"1\", \"s\", \"s\", \"2\"],\n [\"2\", \"s\", \"s\", \"e\"],\n [\"e\", \"s\", \"3\", \"s\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0],[1,0,0,2],[2,0,0,0],[0,s,3,0]]\nSolution\n[[0,0,s,0],[1,0,0,2],[2,s,s,0],[0,s,3,s]]" + }, + { + "role": "user", + "content": "Right" + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"1\", \"*\", \"*\", \"1\"],\n [\"1\", \"s\", \"3\", \"*\"],\n [\"2\", \"*\", \"*\", \"*\"],\n [\"1\", \"s\", \"3\", \"*\"]\n],\n\"answer\": [\n [\"e\", \"s\", \"s\", \"e\"],\n [\"e\", \"s\", \"s\", \"s\"],\n [\"e\", \"s\", \"e\", \"e\"],\n [\"e\", \"s\", \"s\", \"s\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[1,0,0,1],[1,s,3,0],[2,0,0,0],[1,s,3,0]]\nSolution\n[[0,s,s,0],[0,s,s,s],[0,s,0,0],[0,s,s,s]]" + }, + { + "role": "user", + "content": "Right" + } +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/jigsawsudoku/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/jigsawsudoku/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..228fa6bfe1a083c77deb93632cb770eda7c4bbd2 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/jigsawsudoku/filter_prompt.json @@ -0,0 +1,66 @@ +[ + { + "role": "system", + "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only two types values 0 or numbers; if you see * or _ or - or e or #, you should convert them into 0. If you see other numbers such as 1, 2, or 3, you just put it there.If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not" + }, + { + "role": "user", + "content": "json\n{\n \"Initial State\": [[1, 2, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", 1, \"*\"], [\"*\", \"*\", \"*\", \"*\"]],\n \"Thought\": \"To solve this Sudoku puzzle, I need to fill in the empty cells ensuring that each number from 1 to 4 appears exactly once in each row, column, and 2x2 block. I will start by identifying the possible numbers for each empty cell based on the existing ones.\",\n \"Solution\": [[1, 2, 4, 3], [2, 3, 4, 1], [3, 4, 1, 2], [4, 1, 2, 4]]\n}\n" + }, + { + "role": "assistant", + "content": "Perception\n[[1,2,0,0],[2,0,0,0],[0,0,1,0],[0,0,0,0]]\nAnswer\n[[1,2,4,3],[2,3,4,1],[3,4,1,2],[4,1,2,4]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "json\n{\n \"Initial State\": [[4, 1, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", 1]],\n \"Thought\": \"The grid consists of a 4x4 layout divided into 2x2 blocks. Considering existing numbers, I will analyze which numbers can fit into the empty cells while ensuring each row, column, and 2x2 block adhere to Sudoku rules. I will start filling from top left to bottom right.\",\n \"Solution\": [[4, 1, 3, 2], [2, 3, 1, 4], [1, 4, 2, 3], [3, 2, 4, 1]]\n}\n" + }, + { + "role": "assistant", + "content": "Perception\n[[4,1,0,0],[2,0,0,0],[0,0,0,0],[0,0,0,1]]\nAnswer\n[[4,1,3,2],[2,3,1,4],[1,4,2,3],[3,2,4,1]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n\"Initial State\": [\n[[\"*\"], [\"*\"], [\"*\"], [\"4\", \"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"*\"]], \n[[\"1\"], [\"*\"], [\"*\"], [\"3\"]]\n],\n\n\"Thought\": \"The grid is divided into 2x2 blocks. We need to ensure each block, row, and column contains unique numbers from 1 to 4. The current numbers are 1 and 3 in the top-right block and 4 and 1 in the top-left block. We need to fill the remaining cells with numbers 2 and 4 to make sure each block, row, and column has unique numbers. We can start with placing 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"Considering the existing numbers and the rule that each block, row, and column must contain unique numbers, we can place the numbers in the remaining cells. The numbers 2 and 4 are available to fill the cells. We need to ensure that each of these numbers is placed in a way that does not violate the Sudoku rules. For example, we can place 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can place the number 2. In the bottom-right cell of the top-left block, we can place the number 4. Now, we need to fill the remaining cells in the bottom-left block of the top-right block and the top-right cell of the top-left block with the remaining numbers 1 and 3.\",\n\"Solution\": {\n\"Thought\": \"We can place the number 1 in the bottom-left cell of the top-right block and the number 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"The solution is to place the numbers as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"The solution to the Sudoku puzzle is as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can\n``" + }, + { + "role": "assistant", + "content": "Perception\nWrong\nAnswer\nWrong" + }, + { + "role": "user", + "content": "Yes. as perception is not a 2D array and answer is missing. For perception, its first row is 3 single item and 1 2-item array, not valid. Thus, putting Wrong for both perception and answer." + }, + { + "role": "user", + "content": "``\n{\n \"Initial State\": [\n [3, \"*\", 2, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", 2, 3, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n ],\n \"Thought\": \"The given Sudoku puzzle has some numbers already filled in. We need to fill in the missing numbers such that each row, column, and 2x2 sub-grid contains the numbers 1-4 without repetition.\",\n \"Solution\": [\n [3, 1, 2, 4],\n [4, 3, 1, 2],\n [1, 2, 3, 4],\n [2, 4, 4, 1]\n ]\n}\n```\n\nThe solution is obtained by applying the standard Sudoku solving techniques, such as focusing on the most constrained regions first and using logical deductions to eliminate possibilities." + }, + { + "role": "assistant", + "content": "Perception\n[[3,0,2,0],[0,0,0,0],[0,2,3,0],[0,0,0,0]]\nAnswer\n[[3,1,2,4],[4,3,1,2],[1,2,3,4],[2,4,4,1]]" + }, + { + "role": "user", + "content": "Yes. ." + }, + { + "role": "user", + "content": "```json\n{\"perception\": [[3, 1, \"*\", 4], [2, 4, 1, 3], [\"*\", \"*\", 4, \"*\"], [4, \"*\", 3, \"*\"]], \"think\": \"Looking at the first row, the only missing number is 2. \\nLooking at the last row, the missing numbers are 1 and 2. Since the cell at index (3,1) is of the same color as the cell at index (1,1) which contains the number 4, and the cell at index (3,0) contains the number 4, the cell at index (3,1) can only be 1 or 2. Since the cell at index (3,0) already contains 4, the cell at index (3,1) must be 1. Then the cell at index (3,3) must be 2. \\nLooking at the third row, the missing numbers are 1, 2, and 3. Since the cell at index (2,3) is of the same color as the cell at index (0,3) which contains 4, and the cell at index (1,3) contains 3, and the cell at index (2,2) contains 4, the cell at index (2,3) must be 1 or 2. Since the cell at index (3,3) contains 2, the cell at index (2,3) must be 1. Then the cell at index (2,0) must be 3, and the cell at index (2,1) must be 2. \\nTherefore, the solution is [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]].\", \"answer\": [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]]}\n```" + }, + { + "role": "assistant", + "content": "Perception\n[[3,1,0,4],[2,4,1,3],[0,0,4,0],[4,0,3,0]]\nAnswer\n[[3,1,2,4],[2,4,1,3],[3,2,4,1],[4,1,3,2]]" + }, + { + "role": "user", + "content": "Yes. ." + } +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/kakuro/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/kakuro/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..8b7da246d4d4b1830a76ba54297945affdf85104 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/kakuro/filter_prompt.json @@ -0,0 +1,54 @@ +[ + { + "role": "system", + "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only two types values 0 or numbers; if you see * or _ or - or e or #, you should convert them into 0. If you see other numbers such as 1, 2, or 3, you just put it there.If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not" + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"4\", \"2\", \"*\", \"2\"],\n [\"*\", \"4\", \"1\", \"4\"],\n [\"3\", \"*\", \"4\", \"*\"],\n [\"4\", \"2\", \"*\", \"*\"]\n],\n\"think\": \"To solve this Kakuro puzzle, I will analyze the given clues and the constraints of the game. First, I'll look at the rows and columns with only one missing value and try to deduce that value based on the given sum and the existing values. Then, I'll move on to the more complex rows and columns, using logical deduction and the rule that adjacent numbers cannot be the same. I'll continue this process until I have filled in all the empty cells.\",\n\"answer\": [\n [\"4\", \"2\", \"3\", \"2\"],\n [\"8\", \"4\", \"1\", \"4\"],\n [\"3\", \"6\", \"4\", \"5\"],\n [\"4\", \"2\", \"7\", \"3\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[4,2,0,2],[0,4,1,4],[3,0,4,0],[4,2,0,0]]\nSolution\n[[4,2,3,2],[8,4,1,4],[3,6,4,5],[4,2,7,3]]" + }, + { + "role": "user", + "content": "Yes" + }, + { + "role": "user", + "content": "{\"perception\": [[3, 9, 12, 10], [1, 4, 2, 14], [7, 3, 1, 4], [4, 3, 2, 11]], \n\"think\": \"The problem is a constrained optimization problem. We want to fill in the grid with numbers 1 to 4 such that each row and column adds up to the specified sum. We also want to avoid having adjacent numbers be the same. This is a classic example of a constraint satisfaction problem. Our goal is to find a solution that satisfies all the constraints. We can start by analyzing the given information and looking for any patterns or clues that can help us narrow down the possible solutions. For example, in the first row, the sum is 12 and the middle cell is empty. This means that the sum of the two adjacent cells must be 12. If we try different combinations, we can see that the only possibility is 3 + 9 = 12. Similarly, in the second row, the sum is 14 and the middle cell is empty. This means that the sum of the two adjacent cells must be 14. If we try different combinations, we can see that the only possibility is 4 + 10 = 14. We can continue this process for each row and column to find the possible values for the empty cells. Once we have found the possible values for each cell, we can use constraint propagation to eliminate any impossible values and narrow down the search space. Finally, we can use a backtracking search algorithm to find a solution that satisfies all the constraints.\", \n\"answer\": [[3, 9, 12, 10], [1, 4, 2, 14], [7, 3, 1, 4], [4, 3, 2, 11]]}" + }, + { + "role": "assistant", + "content": "Initial State\n[[3,9,12,10],[1,4,2,14],[7,3,1,4],[4,3,2,11]]\nSolution\n[[3,9,12,10],[1,4,2,14],[7,3,1,4],[4,3,2,11]]" + }, + { + "role": "user", + "content": "Right" + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [\n [\"*\", \"4\", \"1\", \"*\"],\n [\"*\", \"3\", \"*\", \"*\"],\n [\"2\", \"*\", \"*\", \"*\"],\n [\"*\", \"2\", \"*\", \"3\"]\n ],\n \"answer\": [\n [\"3\", \"4\", \"1\", \"2\"],\n [\"4\", \"3\", \"2\", \"1\"],\n [\"2\", \"1\", \"4\", \"3\"],\n [\"1\", \"2\", \"3\", \"4\"]\n ]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,4,1,0],[0,3,0,0],[2,0,0,0],[0,2,0,3]]\nSolution\n[[3,4,1,2],[4,3,2,1],[2,1,4,3],[1,2,3,4]]" + }, + { + "role": "user", + "content": "Right." + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [\n [4, \"*\", \"*\", \"*\", \"*\"],\n [2, \"*\", \"*\", \"*\", 3],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [2, 3, \"*\", \"*\", \"*\"]\n ],\n \"answer\": [\n [4, 3, 2, 1, 4],\n [2, 4, 1, 3, 3],\n [1, 2, 4, 3, 2],\n [2, 3, 1, 4, 0]\n ]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[4,0,0,0,0],[2,0,0,0,3],[0,0,0,0,0],[2,3,0,0,0]]\nSolution\n[[4,3,2,1,4],[2,4,1,3,3],[1,2,4,3,2],[2,3,1,4,0]]" + }, + { + "role": "user", + "content": "Right." + } +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/killersudoku/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/killersudoku/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..fd9a6d57adbee1b886bd5ada473e6adfe08469af --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/killersudoku/filter_prompt.json @@ -0,0 +1,90 @@ +[ + { + "role": "system", + "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only two types values 0 or numbers; if you see * or _ or - or e or #, you should convert them into 0. If you see other numbers such as 1, 2, or 3, you just put it there.If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not" + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"*\", 2, \"*\", \"*\"],\n [\"*\", \"*\", 5, \"*\"],\n [\"*\", \"*\", 3, \"*\"],\n [5, \"*\", \"*\", 2]\n],\n\"think\": \"To solve this KillerSudoku puzzle, I will first analyze the given numbers and cages to identify any constraints or deductions that can be made. Then, I will use logic and the rules of Sudoku to fill in the remaining empty cells.\n\nThe teal cage in the top left has a target sum of 9, with one cell already containing 2. So the other cell must be 7.\n\nThe yellow cage has a target sum of 4, with one cell containing 1. So the other cell must be 3.\n\nThe purple cage has a target sum of 5, but no numbers are given, so I cannot deduce anything yet.\n\nThe red cage has a target sum of 7, with one cell containing 5. So the other cell must be 2.\n\nThe blue cage has a target sum of 4, with one cell containing 4, so the other cell must be filled with the remaining number, which is 1.\n\nThe orange cage has no target sum given, but one cell contains 2. The other cell must be either 1, 3, or 4 based on the remaining numbers.\n\nLooking at the rows, columns, and 2x2 blocks, I can place the remaining numbers while satisfying the Sudoku rules.\",\n\"answer\": [\n [7, 2, 1, 4],\n [3, 4, 5, 1],\n [1, 3, 2, 4],\n [5, 1, 4, 2]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,2,0,0],[0,0,5,0],[0,0,3,0],[5,0,0,2]]\nSolution\n[[7,2,1,4],[3,4,5,1],[1,3,2,4],[5,1,4,2]]" + }, + { + "role": "user", + "content": "Right" + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [[null, null, null, null], [null, 1, null, null], [null, null, 2, null], [null, 4, null, 3]],\n\"think\": \"Let's start by filling some obvious cells.\\nThe top-left cell must be 2 since it's a single-cell cage with target 7.\\nThe cell at (1, 0) must be 3 since the cage has a target of 4 and the cell at (1, 1) is 1.\\nThe cell at (0, 1) must be 4 since it's a single-cell cage with target 5.\\nThe cell at (0, 3) must be 3 since it's a single-cell cage with target 3.\\nThe cell at (1, 2) must be 5 since the cage has a target of 6 and the cell at (1, 1) is 1.\\nThe cell at (2, 0) must be 1 since it's a single-cell cage with target 3.\\nThe cell at (3, 0) must be 2 since it's a single-cell cage with target 5 and the cell at (3, 1) is 4.\\nThe cell at (2, 3) must be 4 since it's a single-cell cage with target 7 and the cell at (2, 2) is 2.\\nThe cell at (3, 2) must be 1 since it's a single-cell cage with target 5 and the cell at (3, 1) is 4.\\nThe cell at (1, 3) must be 4 since it's a single-cell cage with target 3 and the cell at (0, 3) is 3.\\nThe cell at (2, 1) must be 3 since the cage has a target of 3 and the cell at (2, 0) is 1.\\nThe cell at (0, 2) must be 2 since the cage has a target of 3 and the cell at (0, 3) is 3.\",\n\"answer\": [[2, 4, 2, 3], [3, 1, 5, 4], [1, 3, 2, 4], [2, 4, 1, 3]]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0],[0,1,0,0],[0,0,2,0],[0,4,0,3]]\nSolution\n[[2,4,2,3],[3,1,5,4],[1,3,2,4],[2,4,1,3]]" + }, + { + "role": "user", + "content": "Right" + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n[3, 7, 5, 4],\n[6, 1, 2, 5],\n[4, 3, 7, 6],\n[8, 9, 10, 11]\n],\n\"answer\": [\n[3, 7, 5, 4],\n[6, 1, 2, 5],\n[4, 3, 7, 6],\n[8, 9, 10, 11]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[3,7,5,4],[6,1,2,5],[4,3,7,6],[8,9,10,11]]\nSolution\n[[3,7,5,4],[6,1,2,5],[4,3,7,6],[8,9,10,11]]" + }, + { + "role": "user", + "content": "Right" + }, + { + "role": "user", + "content": "**Solution to the Killer Sudoku Puzzle**\n\n**Current Game State:**\n\n```\n[2, 4, 3, 5, \n1, 5, 7, 4, \n3, 5, 2, 1, \n9, 4, 9, 8]\n```\n\n**Thinking Process:**\n\nTo solve this Killer Sudoku puzzle, we need to fill in the missing numbers in the grid while satisfying the constraints of each cage. Since the target sums for the cages are not provided, we will focus on using the given numbers to deduce the correct placement.\n\nFirst, let's analyze the top-left 2x2 block. The sum of the numbers in this block is 2 + 4 + 3 + 5 = 14. We can see that the numbers 2, 4, 3, and 5 are already present, leaving only the number 9 to complete the block.\n\nNext, let's examine the middle-left 2x2 block. The sum of the numbers in this block is 1 + 5 + 7 + 4 = 17. We can see that the numbers 1, 5, 7, and 4 are already present, leaving only the number 9 to complete the block.\n\nNow, let's look at the bottom-left 2x2 block. The sum of the numbers in this block is 3 + 5 + 2 + 1 = 11. We can see that the numbers 3, 5, 2, and 1 are already present, leaving only the number 9 to complete the block.\n\nFinally, let's examine the top-right 2x2 block. The sum of the numbers in this block is 9 + 4 + 9 + 8 = 30. We can see that the numbers 9, 4, 9, and 8 are already present, leaving only the number 9 to complete the block.\n\n**Solution:**\n\n```\n[2, 4, 3, 9, \n1, 5, 7, 4, \n3, 5, 2, 1, \n9, 4, 9, 8]\n```\n\nThis solution satisfies all the constraints of the puzzle, including the unique numbers in each row, column, and 2x2 block, as well as the target sums for each cage." + }, + { + "role": "assistant", + "content": "Initial State\n[[2,4,3,5],[1,5,7,4],[3,5,2,1],[9,4,9,8]]\nSolution\n[[2,4,3,9],[1,5,7,4],[3,5,2,1],[9,4,9,8]]" + }, + { + "role": "user", + "content": "Right " + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [[2,1,4,8],[3,2,1,0],[3,2,1,0],[5,4,5,2]],\n\"answer\": [[2,1,4,8],[4,2,1,3],[1,3,2,5],[3,5,2,4]]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[2,1,4,8],[3,2,1,0],[3,2,1,0],[5,4,5,2]]\nSolution\n[[2,1,4,8],[4,2,1,3],[1,3,2,5],[3,5,2,4]]" + }, + { + "role": "user", + "content": "Right" + }, + { + "role": "user", + "content":"```json\n{\n\"answer\": [[2, 1, 3, 4],\n [3, 2, 4, 1],\n [4, 3, 1, 2],\n [1, 4, 2, 3]]\n}\n```\n\nThis solution satisfies all the rules:\n1. Each row, column, and 2x2 block contains unique digits from 1 to 4.\n2. The sum of numbers in each cage matches the target sum:\n - Cage with sum 9: 2 + 1 + 3 = 6\n - Cage with sum 4: 1 + 3 = 4\n - Cage with sum 5: 4 + 1 = 5\n - Cage with sum 5: 2 + 3 = 5\n - Cage with sum 7: 4 + 1 + 2 = 7\n - Cage with sum 5: 2 + 3 = 5\n - Cage with sum 5: 1 + 4 = 5" + }, + { + "role": "assistant", + "content": "Initial State\nWrong\nSolution\n[[2,1,3,4],[3,2,4,1],[4,3,1,2],[1,4,2,3]]" + }, + { + "role": "user", + "content": "Right, " + }, + { + "role": "user", + "content":"```json\n{\n\"answer\": [\n[2, 1, 3, 4],\n[9, 3, 6, 3],\n[5, 8, 1, 4],\n[1, 4, 7, 2]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\nWrong\nSolution\n[[2,1,3,4],[9,3,6,3],[5,8,1,4],[1,4,7,2]]" + }, + { + "role": "user", + "content": "Right, " + } +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/lightup/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/lightup/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..d8a6c4dffb249a475f82f3861088f957ef4db2e6 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/lightup/filter_prompt.json @@ -0,0 +1,92 @@ +[ + { + "role": "system", + "content": + "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only w or 0 or s; *, or 0, or _, or -, or e might indicates empty that you should convert to 0. If you see numbers other than 2, you should convert them to w. If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not." + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"s\", \"w\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"w\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n],\n\"answer\": [\n [\"s\", \"w\", \"e\", \"s\"],\n [\"e\", \"s\", \"e\", \"e\"],\n [\"e\", \"e\", \"w\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[s,w,0,0],[0,0,0,0],[0,0,w,0],[0,0,0,0]]\nSolution\n[[s,w,0,s],[0,s,0,0],[0,0,w,0],[0,0,0,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "The current state of the grid in the screenshot is:\n\n```\n{\n\"perception\": [\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"2\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"]\n],\n\"answer\": [\n [\"s\", \"s\", \"e\", \"e\", \"e\"],\n [\"s\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"s\"],\n [\"e\", \"e\", \"e\", \"s\", \"e\"]\n]\n}\n```\n\nThis solution satisfies all the rules of the game. The numbered wall has exactly two bulbs adjacent to it, and all empty cells are illuminated. The bulbs do not illuminate each other." + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0,0],[0,w,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]\nSolution\n[[s,s,0,0,0],[s,0,0,0,0],[0,0,0,0,0],[0,0,0,0,s],[0,0,0,s,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [[\"s\", \"2\", \"*\", \"*\"], [\"w\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"w\", \"*\"], [\"*\", \"*\", \"*\", \"*\"]],\n\"answer\": [[\"s\", \"e\", \"s\", \"e\"], [\"e\", \"e\", \"e\", \"s\"], [\"s\", \"e\", \"e\", \"e\"], [\"e\", \"s\", \"e\", \"e\"]]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[s,w,0,0],[w,0,0,0],[0,0,w,0],[0,0,0,0]]\nSolution\n[[s,0,s,0],[0,0,0,s],[s,0,0,0],[0,s,0,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [\n [\"s\", \"*\", \"w\", \"*\"],\n [\"w\", \"2\", \"*\", \"*\"],\n [\"*\", \"*\", \"w\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n ],\n \"think\": \"1. Start by placing bulbs around the '2' wall. Positions (0,1) and (1,2) work because they don't conflict with any other constraints and fulfill the wall's requirement.\\n2. Check other empty cells to ensure they are illuminated without overlapping bulbs.\\n3. Place bulbs in (2,0) and (3,2) to cover remaining empty areas left by previous placements, ensuring no bulbs are facing directly toward each other.\",\n \"answer\": [\n [\"s\", \"s\", \"w\", \"s\"],\n [\"w\", \"2\", \"s\", \"e\"],\n [\"s\", \"e\", \"w\", \"s\"],\n [\"e\", \"e\", \"s\", \"e\"]\n ]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[s,0,w,0],[w,w,0,0],[0,0,w,0],[0,0,0,0]]\nSolution\n[[s,s,w,s],[w,w,s,0],[s,0,w,s],[0,0,s,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "The current state of the grid as a 2D array is:\n```json\n[\n [\".\", \"2\", \"s\"],\n [\"s\", \".\", \"s\"],\n [\"s\", \".\", \"s\"],\n [\"s\", \".\", \"s\"],\n [\"s\", \".\", \"s\"],\n [\"s\", \".\", \"s\"],\n [\"s\", \".\", \"s\"],\n [\"s\", \".\", \"s\"],\n [\"s\", \".\", \"s\"],\n [\"s\", \".\", \"s\"]\n]\n```\nThe perception is that all cells are empty except for the two cells marked with \"2\", which must have exactly that number of bulbs adjacent to them. The numbered walls must have exactly that many bulbs adjacent to them, and bulbs cannot illuminate each other.\n\nThe think is that the only possible solution is to place a bulb in the cell adjacent to the \"2\" on the top left corner, and then place bulbs in the cells adjacent to the \"2\" on the top right corner. This will illuminate all empty cells without violating any of the rules.\n\nThe answer is:\n```json\n[\n [\"s\", \"2\", \"s\"],\n [\"s\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\"]\n]\n```\nThis solution satisfies all the rules and illuminates all empty cells." + }, + { + "role": "assistant", + "content": "Initial State\n[[0,w,s],[s,0,s],[s,0,s],[s,0,s],[s,0,s],[s,0,s],[s,0,s],[s,0,s],[s,0,s],[s,0,s],[s,0,s]]\nSolution\n[[s,w,s],[s,s,s],[s,s,s],[s,s,s],[s,s,s],[s,s,s],[s,s,s],[s,s,s],[s,s,s],[s,s,s]]" + + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"*\", \"w\", \"*\", \"*\"],\n [\"*\", \"2\", \"*\", \"*\"], \n [\"s\", \"*\", \"w\", \"*\"],\n [\"*\", \"s\", \"*\", \"*\"]\n],\n\"answer\": [\n [\"e\", \"w\", \"s\", \"e\"],\n [\"e\", \"2\", \"e\", \"e\"],\n [\"s\", \"e\", \"w\", \"e\"], \n [\"e\", \"s\", \"e\", \"e\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,w,0,0],[0,w,0,0],[s,0,w,0],[0,s,0,0]]\nSolution\n[[0,w,s,0],[0,2,0,0],[s,0,w,0],[0,s,0,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [[\"*\", \"2\", \"*\", \"*\", \"*\"], [\"w\", \"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"w\", \"*\", \"*\"], [\"*\", \"s\", \"*\", \"*\", \"*\"]],\n\"answer\": [[\"e\", \"2\", \"e\", \"s\", \"e\"], [\"w\", \"e\", \"e\", \"e\", \"e\"], [\"e\", \"e\", \"w\", \"e\", \"e\"], [\"e\", \"s\", \"e\", \"e\", \"e\"]]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,w,0,0,0],[w,0,0,0,0],[0,0,w,0,0],[0,s,0,0,0]]\nSolution\n[[0,w,0,s,0],[w,0,0,0,0],[0,0,w,0,0],[0,s,0,0,0]]" + }, + { + "role": "user", + "content": "Yes. " + } +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/nonogram/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/nonogram/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..e4f9dcfaf69fbcb9da630be5ce89e1e38a5ce649 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/nonogram/filter_prompt.json @@ -0,0 +1,78 @@ +[ + { + "role": "system", + "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only 0 or s; *, or 0, or _, or -, or e might indicates empty that you should convert to 0. If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not." + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"s\", \"s\"]\n],\n\"think\": \"The image shows a Nonogram puzzle grid with 5 rows and 5 columns. The numbers on the side indicate the lengths of consecutive filled cells in each row and column.\n\nFor the rows:\n- Row 0 has no numbers, so it should be all empty cells.\n- Row 1 also has no numbers, so it should be all empty cells.\n- Row 2 has the number 3, indicating a block of 3 consecutive filled cells.\n- Row 3 has the number 4, indicating a block of 4 consecutive filled cells.\n- Row 4 has the number 5, and the last two cells are already filled, so the first 3 cells should also be filled.\n\nFor the columns:\n- Column 0 has the number 3, indicating a block of 3 consecutive filled cells.\n- Column 1 has the number 3, indicating a block of 3 consecutive filled cells.\n- Column 2 has the number 3, indicating a block of 3 consecutive filled cells.\n- Column 3 has the number 2, indicating a block of 2 consecutive filled cells.\n- Column 4 has the number 1, indicating a single filled cell, which is already given.\n\nBy combining the row and column constraints, we can deduce the solution.\n\",\n\"answer\": [\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\", \"s\", \"e\"],\n [\"s\", \"s\", \"s\", \"s\", \"s\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,s,s]]\nSolution\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,s,s],[s,s,s,s,0],[s,s,s,s,s]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"]\n ],\n \"think\": \"Row 0: The clue is 5, meaning all cells in this row must be filled.\\nColumn 0: The clue is 3. Since row 0 is filled, we can fill the first three cells of column 0.\\nColumn 1: The clue is 3. Since row 0 is filled, we can fill the first three cells of column 1.\\nColumn 2: The clue is 3. Since row 0 is filled, we can fill the first three cells of column 2.\\nRow 1: The clue is 4. Since the first three cells of columns 0, 1, and 2 are filled, the first three cells of row 1 must also be filled. The last cell must be empty because column 4 has a clue of 1 and row 0 is already filled.\\nRow 2: The clue is 3. Since the first three cells of columns 0, 1, and 2 are filled, the first three cells of row 2 must also be filled.\\nColumn 3: The clue is 2. Since rows 0 and 1 are filled at column 3, and row 2 is filled in the first three cells, the two filled cells must be in rows 0 and 1.\\nColumn 4: The clue is 1. Since row 0 is filled, the filled cell must be in row 0.\\nRow 3 and Row 4: The clue is 0, meaning all cells in these rows must be empty.\",\n \"answer\": [\n [\"s\", \"s\", \"s\", \"s\", \"s\"],\n [\"s\", \"s\", \"s\", \"s\", \"e\"],\n [\"s\", \"s\", \"s\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"e\"]\n ]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]\nSolution\n[[s,s,s,s,s],[s,s,s,s,0],[s,s,s,0,0],[0,0,0,0,0],[0,0,0,0,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"]\n],\n\"answer\": [\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"e\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]\nSolution\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [[\"s\", \"s\", \"s\", \"e\", \"s\", \"s\", \"e\", \"s\", \"e\", \"e\"], [\"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\"], [\"e\", \"e\", \"s\", \"s\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\"], [\"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\"], [\"e\", \"e\", \"e\", \"e\", \"s\", \"s\", \"e\", \"e\", \"e\", \"e\"], [\"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\"]],\n \"think\": \"Based on the clues, I need to fill in the rows and columns accordingly. The numbers on the left represent the blocks of filled cells in that row. For instance, row 0 has '3' meaning three filled cells together, which I placed in the first three cells. Row 1 has no filled cells according to the numbers. For row 2, it has '2', indicating two blocks but only one block (2 cells) is filled. I continued this way for each row and then confirmed the columns were also consistent with the clues provided.\",\n \"answer\": [[\"s\", \"s\", \"s\", \"e\", \"s\", \"s\", \"e\", \"s\", \"e\", \"e\"], [\"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\"], [\"e\", \"e\", \"s\", \"s\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\"], [\"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\"], [\"e\", \"e\", \"e\", \"e\", \"s\", \"s\", \"e\", \"e\", \"e\", \"e\"], [\"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\", \"e\"]]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[s,s,s,0,s,s,0,s,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,s,s,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,s,s,0,0,0,0],[0,0,0,0,0,0,0,0,0,0]]\nSolution\n[[s,s,s,0,s,s,0,s,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,s,s,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,s,s,0,0,0,0],[0,0,0,0,0,0,0,0,0,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [[*3*, *3*, *3*, *3*],\n [*3*, 3, *3*, *3*],\n [*3*, *3*, *3*, *3*],\n [*3*, *3*, *3*, *3*]],\n \"think\": \"Let's analyze each row one by one. The first row has a single 3-cell block, which suggests that all the cells in that row must be filled. The second row also has a single 3-cell block, so we can fill in the 3 cells from index 0 to 2. The third row has two 3-cell blocks, indicating that there must be at least one empty cell between them. The fourth row also has two 3-cell blocks, and as before, we can fill in the 3 cells from index 0 to 2. The last row has a single 3-cell block, which means that all the cells in that row must be filled.\",\n \"answer\": [[0, 3, 0, 3],\n [3, 3, 0, 3],\n [0, 3, 0, 3],\n [3, 3, 0, 3]]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[3,3,3,3],[3,3,3,3],[3,3,3,3],[3,3,3,3]]\nSolution\n[[0,3,0,3],[3,3,0,3],[0,3,0,3],[3,3,0,3]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n[\"*\", \"*\", \"*\", \"s\", \"s\"],\n[\"*\", \"*\", \"*\", \"s\", \"*\"],\n[\"*\", \"*\", \"*\", \"*\", \"*\"],\n[\"*\", \"*\", \"*\", \"*\", \"*\"],\n[\"*\", \"*\", \"*\", \"*\", \"*\"]\n],\n\"answer\": [\n[\"e\", \"e\", \"s\", \"s\", \"s\"],\n[\"e\", \"e\", \"s\", \"s\", \"s\"],\n[\"e\", \"e\", \"s\", \"s\", \"s\"],\n[\"e\", \"e\", \"e\", \"e\", \"e\"],\n[\"e\", \"e\", \"e\", \"e\", \"e\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,s,s],[0,0,0,s,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]\nSolution\n[[0,0,s,s,s],[0,0,s,s,s],[0,0,s,s,s],[0,0,0,0,0],[0,0,0,0,0]]" + }, + { + "role": "user", + "content": "Yes. " + } +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/renzoku/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/renzoku/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..228fa6bfe1a083c77deb93632cb770eda7c4bbd2 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/renzoku/filter_prompt.json @@ -0,0 +1,66 @@ +[ + { + "role": "system", + "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only two types values 0 or numbers; if you see * or _ or - or e or #, you should convert them into 0. If you see other numbers such as 1, 2, or 3, you just put it there.If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not" + }, + { + "role": "user", + "content": "json\n{\n \"Initial State\": [[1, 2, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", 1, \"*\"], [\"*\", \"*\", \"*\", \"*\"]],\n \"Thought\": \"To solve this Sudoku puzzle, I need to fill in the empty cells ensuring that each number from 1 to 4 appears exactly once in each row, column, and 2x2 block. I will start by identifying the possible numbers for each empty cell based on the existing ones.\",\n \"Solution\": [[1, 2, 4, 3], [2, 3, 4, 1], [3, 4, 1, 2], [4, 1, 2, 4]]\n}\n" + }, + { + "role": "assistant", + "content": "Perception\n[[1,2,0,0],[2,0,0,0],[0,0,1,0],[0,0,0,0]]\nAnswer\n[[1,2,4,3],[2,3,4,1],[3,4,1,2],[4,1,2,4]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "json\n{\n \"Initial State\": [[4, 1, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", 1]],\n \"Thought\": \"The grid consists of a 4x4 layout divided into 2x2 blocks. Considering existing numbers, I will analyze which numbers can fit into the empty cells while ensuring each row, column, and 2x2 block adhere to Sudoku rules. I will start filling from top left to bottom right.\",\n \"Solution\": [[4, 1, 3, 2], [2, 3, 1, 4], [1, 4, 2, 3], [3, 2, 4, 1]]\n}\n" + }, + { + "role": "assistant", + "content": "Perception\n[[4,1,0,0],[2,0,0,0],[0,0,0,0],[0,0,0,1]]\nAnswer\n[[4,1,3,2],[2,3,1,4],[1,4,2,3],[3,2,4,1]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n\"Initial State\": [\n[[\"*\"], [\"*\"], [\"*\"], [\"4\", \"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"*\"]], \n[[\"1\"], [\"*\"], [\"*\"], [\"3\"]]\n],\n\n\"Thought\": \"The grid is divided into 2x2 blocks. We need to ensure each block, row, and column contains unique numbers from 1 to 4. The current numbers are 1 and 3 in the top-right block and 4 and 1 in the top-left block. We need to fill the remaining cells with numbers 2 and 4 to make sure each block, row, and column has unique numbers. We can start with placing 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"Considering the existing numbers and the rule that each block, row, and column must contain unique numbers, we can place the numbers in the remaining cells. The numbers 2 and 4 are available to fill the cells. We need to ensure that each of these numbers is placed in a way that does not violate the Sudoku rules. For example, we can place 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can place the number 2. In the bottom-right cell of the top-left block, we can place the number 4. Now, we need to fill the remaining cells in the bottom-left block of the top-right block and the top-right cell of the top-left block with the remaining numbers 1 and 3.\",\n\"Solution\": {\n\"Thought\": \"We can place the number 1 in the bottom-left cell of the top-right block and the number 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"The solution is to place the numbers as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"The solution to the Sudoku puzzle is as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can\n``" + }, + { + "role": "assistant", + "content": "Perception\nWrong\nAnswer\nWrong" + }, + { + "role": "user", + "content": "Yes. as perception is not a 2D array and answer is missing. For perception, its first row is 3 single item and 1 2-item array, not valid. Thus, putting Wrong for both perception and answer." + }, + { + "role": "user", + "content": "``\n{\n \"Initial State\": [\n [3, \"*\", 2, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", 2, 3, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n ],\n \"Thought\": \"The given Sudoku puzzle has some numbers already filled in. We need to fill in the missing numbers such that each row, column, and 2x2 sub-grid contains the numbers 1-4 without repetition.\",\n \"Solution\": [\n [3, 1, 2, 4],\n [4, 3, 1, 2],\n [1, 2, 3, 4],\n [2, 4, 4, 1]\n ]\n}\n```\n\nThe solution is obtained by applying the standard Sudoku solving techniques, such as focusing on the most constrained regions first and using logical deductions to eliminate possibilities." + }, + { + "role": "assistant", + "content": "Perception\n[[3,0,2,0],[0,0,0,0],[0,2,3,0],[0,0,0,0]]\nAnswer\n[[3,1,2,4],[4,3,1,2],[1,2,3,4],[2,4,4,1]]" + }, + { + "role": "user", + "content": "Yes. ." + }, + { + "role": "user", + "content": "```json\n{\"perception\": [[3, 1, \"*\", 4], [2, 4, 1, 3], [\"*\", \"*\", 4, \"*\"], [4, \"*\", 3, \"*\"]], \"think\": \"Looking at the first row, the only missing number is 2. \\nLooking at the last row, the missing numbers are 1 and 2. Since the cell at index (3,1) is of the same color as the cell at index (1,1) which contains the number 4, and the cell at index (3,0) contains the number 4, the cell at index (3,1) can only be 1 or 2. Since the cell at index (3,0) already contains 4, the cell at index (3,1) must be 1. Then the cell at index (3,3) must be 2. \\nLooking at the third row, the missing numbers are 1, 2, and 3. Since the cell at index (2,3) is of the same color as the cell at index (0,3) which contains 4, and the cell at index (1,3) contains 3, and the cell at index (2,2) contains 4, the cell at index (2,3) must be 1 or 2. Since the cell at index (3,3) contains 2, the cell at index (2,3) must be 1. Then the cell at index (2,0) must be 3, and the cell at index (2,1) must be 2. \\nTherefore, the solution is [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]].\", \"answer\": [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]]}\n```" + }, + { + "role": "assistant", + "content": "Perception\n[[3,1,0,4],[2,4,1,3],[0,0,4,0],[4,0,3,0]]\nAnswer\n[[3,1,2,4],[2,4,1,3],[3,2,4,1],[4,1,3,2]]" + }, + { + "role": "user", + "content": "Yes. ." + } +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/starbattle/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/starbattle/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..deca0f5c63613f9be51db2bc34e483fecdff6968 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/starbattle/filter_prompt.json @@ -0,0 +1,49 @@ +[ + { + "role": "system", + "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only 0 or s; *, or 0, or _, or -, or e might indicates empty that you should convert to 0. If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not."}, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"*\", \"*\", \"s\", \"s\", \"s\"],\n [\"*\", \"*\", \"s\", \"s\", \"s\"],\n [\"*\", \"*\", \"s\", \"s\", \"s\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"s\", \"*\", \"*\", \"*\", \"*\"]\n],\n\"think\": \"Based on the current state of the grid, there is already a star placed in the bottom-left corner cell. To solve this puzzle, I need to place the remaining stars in such a way that no two stars are touching each other, not even diagonally, and each row, column, and region contains exactly one star. I'll analyze the grid row by row and column by column to identify the available cells where I can place the remaining stars.\",\n\"answer\": [\n [\"e\", \"e\", \"s\", \"s\", \"s\"],\n [\"e\", \"e\", \"s\", \"s\", \"s\"], \n [\"e\", \"e\", \"s\", \"s\", \"s\"],\n [\"e\", \"s\", \"e\", \"e\", \"e\"],\n [\"s\", \"e\", \"e\", \"e\", \"e\"],\n [\"s\", \"e\", \"e\", \"e\", \"e\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,s,s,s],[0,0,s,s,s],[0,0,s,s,s],[0,0,0,0,0],[0,0,0,0,0],[s,0,0,0,0]]\nSolution\n[[0,0,s,s,s],[0,0,s,s,s],[0,0,s,s,s],[0,s,0,0,0],[s,0,0,0,0],[s,0,0,0,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": [\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"s\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"]\n ],\n \"think\": \"The given grid is a 5x5 Star Battle puzzle. The goal is to place stars such that each row, column, and 2x2 region contains exactly one star, and no two stars are adjacent (including diagonally). I'll start by filling in the obvious spots, considering the constraints. The existing star in the bottom-left corner restricts placement in its vicinity. I'll proceed systematically, row by row, and column by column, checking for conflicts.\",\n \"answer\": [\n [\"e\", \"e\", \"s\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"s\"],\n [\"e\", \"s\", \"e\", \"e\", \"e\"],\n [\"s\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"e\", \"s\", \"e\"]\n ]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[s,0,0,0,0],[0,0,0,0,0]]\nSolution\n[[0,0,s,0,0],[0,0,0,0,s],[0,s,0,0,0],[s,0,0,0,0],[0,0,0,s,0]]" + }, + { + "role": "user", + "content": "Yes." + }, + { + "role": "user", + "content": "I'm sorry, I can't visualize the image directly. Could you please describe the grid or textually represent it so I can help you with the Star Battle solution?" + }, + { + "role": "assistant", + "content": "Initial State\nWrong\nSolution\nWrong" + }, + { + "role": "user", + "content": "**Current Game State**\n\nThe game board is a 4x5 grid with some cells containing stars and others being empty.\n\n```json\n{\"perception\": [[\"s\", \"*\", \"*\", \"*\", \"*\"], \n [\"*\", \"*\", \"s\", \"*\", \"*\"], \n [\"*\", \"s\", \"*\", \"s\", \"*\"], \n [\"*\", \"*\", \"*\", \"*\", \"*\"]]}\n```\n\n**Thinking Process**\n\nTo solve this puzzle, we need to place stars in each row, column, and region such that no two stars touch each other, not even diagonally. We can start by identifying the regions where we can place the stars:\n\n* Region 1: Top-left 2x2 box\n* Region 2: Top-right 2x2 box\n* Region 3: Middle 2x2 box\n* Region 4: Bottom 2x2 box\n\nWe can also observe that the first column has only one star, so we need to place the remaining three stars in the other columns.\n\n**Solution**\n\nBased on the above analysis, here's our solution:\n\n```json\n{\"answer\": [[\"s\", \"e\", \"e\", \"e\", \"e\"], \n [\"e\", \"s\", \"e\", \"e\", \"e\"], \n [\"e\", \"e\", \"s\", \"e\", \"s\"], \n [\"e\", \"e\", \"e\", \"e\", \"s\"]]}\n```\n\nIn this solution, we have placed stars in each row, column, and region, and made sure that no two stars touch each other." + }, + { + "role": "assistant", + "content": "Initial State\n[[s,0,0,0,0],[0,0,s,0,0],[0,s,0,s,0],[0,0,0,0,0]]\nSolution\n[[s,0,0,0,0],[0,s,0,0,0],[0,0,s,0,s],[0,0,0,0,s]]" + }, + { + "role": "user", + "content": "Yes." + } +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/sudoku/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/sudoku/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..228fa6bfe1a083c77deb93632cb770eda7c4bbd2 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/sudoku/filter_prompt.json @@ -0,0 +1,66 @@ +[ + { + "role": "system", + "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only two types values 0 or numbers; if you see * or _ or - or e or #, you should convert them into 0. If you see other numbers such as 1, 2, or 3, you just put it there.If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not" + }, + { + "role": "user", + "content": "json\n{\n \"Initial State\": [[1, 2, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", 1, \"*\"], [\"*\", \"*\", \"*\", \"*\"]],\n \"Thought\": \"To solve this Sudoku puzzle, I need to fill in the empty cells ensuring that each number from 1 to 4 appears exactly once in each row, column, and 2x2 block. I will start by identifying the possible numbers for each empty cell based on the existing ones.\",\n \"Solution\": [[1, 2, 4, 3], [2, 3, 4, 1], [3, 4, 1, 2], [4, 1, 2, 4]]\n}\n" + }, + { + "role": "assistant", + "content": "Perception\n[[1,2,0,0],[2,0,0,0],[0,0,1,0],[0,0,0,0]]\nAnswer\n[[1,2,4,3],[2,3,4,1],[3,4,1,2],[4,1,2,4]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "json\n{\n \"Initial State\": [[4, 1, \"*\", \"*\"], [2, \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", 1]],\n \"Thought\": \"The grid consists of a 4x4 layout divided into 2x2 blocks. Considering existing numbers, I will analyze which numbers can fit into the empty cells while ensuring each row, column, and 2x2 block adhere to Sudoku rules. I will start filling from top left to bottom right.\",\n \"Solution\": [[4, 1, 3, 2], [2, 3, 1, 4], [1, 4, 2, 3], [3, 2, 4, 1]]\n}\n" + }, + { + "role": "assistant", + "content": "Perception\n[[4,1,0,0],[2,0,0,0],[0,0,0,0],[0,0,0,1]]\nAnswer\n[[4,1,3,2],[2,3,1,4],[1,4,2,3],[3,2,4,1]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n\"Initial State\": [\n[[\"*\"], [\"*\"], [\"*\"], [\"4\", \"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"1\"]], \n[[\"*\"], [\"*\"], [\"*\"], [\"*\"]], \n[[\"1\"], [\"*\"], [\"*\"], [\"3\"]]\n],\n\n\"Thought\": \"The grid is divided into 2x2 blocks. We need to ensure each block, row, and column contains unique numbers from 1 to 4. The current numbers are 1 and 3 in the top-right block and 4 and 1 in the top-left block. We need to fill the remaining cells with numbers 2 and 4 to make sure each block, row, and column has unique numbers. We can start with placing 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"Considering the existing numbers and the rule that each block, row, and column must contain unique numbers, we can place the numbers in the remaining cells. The numbers 2 and 4 are available to fill the cells. We need to ensure that each of these numbers is placed in a way that does not violate the Sudoku rules. For example, we can place 2 in the bottom-left cell of the top-right block and 4 in the bottom-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can place the number 2. In the bottom-right cell of the top-left block, we can place the number 4. Now, we need to fill the remaining cells in the bottom-left block of the top-right block and the top-right cell of the top-left block with the remaining numbers 1 and 3.\",\n\"Solution\": {\n\"Thought\": \"We can place the number 1 in the bottom-left cell of the top-right block and the number 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"The solution is to place the numbers as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block.\",\n\"Solution\": {\n\"Thought\": \"The solution to the Sudoku puzzle is as follows: 2 in the bottom-left cell of the top-right block, 4 in the bottom-right cell of the top-left block, 1 in the bottom-left cell of the top-right block, and 3 in the top-right cell of the top-left block. This will ensure that each block, row, and column contains unique numbers from 1 to 4.\",\n\"Solution\": {\n\"Thought\": \"In the bottom-left cell of the top-right block, we can\n``" + }, + { + "role": "assistant", + "content": "Perception\nWrong\nAnswer\nWrong" + }, + { + "role": "user", + "content": "Yes. as perception is not a 2D array and answer is missing. For perception, its first row is 3 single item and 1 2-item array, not valid. Thus, putting Wrong for both perception and answer." + }, + { + "role": "user", + "content": "``\n{\n \"Initial State\": [\n [3, \"*\", 2, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"],\n [\"*\", 2, 3, \"*\"],\n [\"*\", \"*\", \"*\", \"*\"]\n ],\n \"Thought\": \"The given Sudoku puzzle has some numbers already filled in. We need to fill in the missing numbers such that each row, column, and 2x2 sub-grid contains the numbers 1-4 without repetition.\",\n \"Solution\": [\n [3, 1, 2, 4],\n [4, 3, 1, 2],\n [1, 2, 3, 4],\n [2, 4, 4, 1]\n ]\n}\n```\n\nThe solution is obtained by applying the standard Sudoku solving techniques, such as focusing on the most constrained regions first and using logical deductions to eliminate possibilities." + }, + { + "role": "assistant", + "content": "Perception\n[[3,0,2,0],[0,0,0,0],[0,2,3,0],[0,0,0,0]]\nAnswer\n[[3,1,2,4],[4,3,1,2],[1,2,3,4],[2,4,4,1]]" + }, + { + "role": "user", + "content": "Yes. ." + }, + { + "role": "user", + "content": "```json\n{\"perception\": [[3, 1, \"*\", 4], [2, 4, 1, 3], [\"*\", \"*\", 4, \"*\"], [4, \"*\", 3, \"*\"]], \"think\": \"Looking at the first row, the only missing number is 2. \\nLooking at the last row, the missing numbers are 1 and 2. Since the cell at index (3,1) is of the same color as the cell at index (1,1) which contains the number 4, and the cell at index (3,0) contains the number 4, the cell at index (3,1) can only be 1 or 2. Since the cell at index (3,0) already contains 4, the cell at index (3,1) must be 1. Then the cell at index (3,3) must be 2. \\nLooking at the third row, the missing numbers are 1, 2, and 3. Since the cell at index (2,3) is of the same color as the cell at index (0,3) which contains 4, and the cell at index (1,3) contains 3, and the cell at index (2,2) contains 4, the cell at index (2,3) must be 1 or 2. Since the cell at index (3,3) contains 2, the cell at index (2,3) must be 1. Then the cell at index (2,0) must be 3, and the cell at index (2,1) must be 2. \\nTherefore, the solution is [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]].\", \"answer\": [[3, 1, 2, 4], [2, 4, 1, 3], [3, 2, 4, 1], [4, 1, 3, 2]]}\n```" + }, + { + "role": "assistant", + "content": "Perception\n[[3,1,0,4],[2,4,1,3],[0,0,4,0],[4,0,3,0]]\nAnswer\n[[3,1,2,4],[2,4,1,3],[3,2,4,1],[4,1,3,2]]" + }, + { + "role": "user", + "content": "Yes. ." + } +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/thermometers/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/thermometers/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..1c0b69400ad5881246da1e35c4bfb26e48c07176 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/thermometers/filter_prompt.json @@ -0,0 +1,41 @@ +[ + { + "role": "system", + "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only 0 or s; *, or 0, or _, or -, or e might indicates empty that you should convert to 0. If the perception / answer is not a valid solution or not existe, please put a Wrong there. Your goal is only to format the user's input, not to check if it is correct or not."}, + { + "role": "user", + "content": "{\n\"perception\": \"***s*\\n*s***\\n*s*s*\\n**ss*\",\n\"think\": \"To solve this Thermometers puzzle, I need to fill the cells in a way that creates continuous thermometers from the bulbs to the top, without any gaps, and matches the given row and column constraints. I'll analyze each row and column to determine the required filled cells.\",\n\"answer\": \"ssss*\\nssss*\\n*ssss\\n**sse\"\n}" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,s,0],[0,s,0,0,0],[0,s,0,s,0],[0,0,s,s,0]]\nSolution\n[[s,s,s,s,0],[s,s,s,s,0],[0,s,s,s,s],[0,0,s,s,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": \"{{0,0:s},{0,0:s},{0,1:s},{0,1:s},{0,2:s},{0,3:s},{1,0:s},{1,0:s},{1,0:s},{1,0:s},{1,2:s},{1,3:s},{1,3:s},{2,0:s},{2,0:s},{2,1:s},{2,1:s},{2,2:s},{2,2:s},{2,3:s},{2,3:s},{3,0:s},{3,0:s},{3,1:s},{3,1:s},{3,2:s},{3,2:s},{3,3:s},{3,3:s}}\",\n \"think\": \"The puzzle is a Thermometers puzzle. The numbers on the sides indicate how many filled cells must be in each row and column. The thermometers must be filled from their bulb (start) to their top, without gaps. I will start by filling the cells that are already partially filled. Then, I will look at the rows and columns to see if there are any cells that must be filled based on the numbers on the sides. I will continue this process until all of the thermometers are filled.\",\n \"answer\": \"{{0,0:s},{0,0:s},{0,1:s},{0,1:s},{0,2:s},{0,3:s},{1,0:s},{1,0:s},{1,0:s},{1,0:s},{1,2:s},{1,3:s},{1,3:s},{2,0:s},{2,0:s},{2,1:s},{2,1:s},{2,2:s},{2,2:s},{2,3:s},{2,3:s},{3,0:s},{3,0:s},{3,1:s},{3,1:s},{3,2:s},{3,2:s},{3,3:s},{3,3:s}}\"\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\nWrong\nSolution\nWrong" + }, + { + "role": "user", + "content": "Yes, as you cannot extract perception and answer as a 2d array, you have to mark it as Wrong. " + }, + { + "role": "user", + "content": "{\n\"perception\": \"Row 0: * e * s | Row 1: s e e e | Row 2: * e * s | Row 3: * e * *\",\n\"think\": \"First, check the filled cells needed for each row and column. Row 1 requires all cells filled since it has 4 spaces and needs 4 filled. Row 0 requires 2 more filled cells, and Row 3 requires all 4 cells filled. Column 1 is already fully filled. Proceed to fill the thermometers ensuring continuity without exceeding the required number of filled spaces per row and column.\",\n\"answer\": \"Row 0: s e * s | Row 1: s s s s | Row 2: s e s e | Row 3: s s s s\"\n}" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,s],[s,0,0,0],[0,0,0,s],[0,0,0,0]]\nSolution\n[[s,0,0,s],[s,s,s,s],[s,0,0,0],[s,s,s,s]]" + }, + { + "role": "user", + "content": "Yes, though the input is not an array, but the message is clear. " + } +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/treesandtents/filter_prompt.json b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/treesandtents/filter_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..caa95fc684b791aa8797f26feb00a3b6af1e2809 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/configs/formating-prompt/treesandtents/filter_prompt.json @@ -0,0 +1,79 @@ +[ + { + "role": "system", + "content": "You are a text formater, your goal is to extract two parts, perception and answer from the answer and put them into a good format. Please according to the conversation history with the user feedback. The desired format is a 2d array, where there is only 0 or r, t these three characters; *, or 0, or _, or -, or e might indicates empty that you should convert to 0. tr means tree, and should be converted to r. tt means tent, and should be converted to t. It is possible that perception and / or answer is missing or invalid, you just put a Wrong for the wrong / both. Your goal is only to format the user's input, not to check if it is correct or not."}, + { + "role": "user", + "content": "```json\n{\n\"perception\": [\n [\"*\", \"*\", \"*\", \"tr\", \"tr\"],\n [\"*\", \"*\", \"*\", \"*\", \"*\"],\n [\"*\", \"tr\", \"*\", \"*\", \"*\"],\n [\"*\", \"*\", \"tr\", \"*\", \"*\"],\n [\"tr\", \"tr\", \"*\", \"*\", \"*\"]\n],\n\"answer\": [\n [\"e\", \"e\", \"e\", \"tt\", \"tt\"],\n [\"e\", \"e\", \"e\", \"e\", \"e\"],\n [\"e\", \"tt\", \"e\", \"e\", \"e\"],\n [\"e\", \"e\", \"tt\", \"e\", \"e\"],\n [\"tt\", \"tt\", \"e\", \"e\", \"e\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,r,r],[0,0,0,0,0],[0,r,0,0,0],[0,0,r,0,0],[r,r,0,0,0]]\nSolution\n[[0,0,0,t,t],[0,0,0,0,0],[0,t,0,0,0],[0,0,t,0,0],[t,t,0,0,0]]" + }, + { + "role": "user", + "content": "Yes. " + }, + { + "role": "user", + "content": "```json\n{\n \"perception\": \"[[e, e, e, tr, tr], [e, e, e, e, e], [e, tr, e, e, e], [e, e, tr, e, e], [tt, tr, e, e, e]]\",\n \"think\": \"The constraints are that each tree must have exactly one tent next to it, horizontally or vertically. Tents cannot be adjacent to each other. Looking at the bottom row, there's a tent and a tree. The tree at (4,1) needs a tent. The only place to put it is (3,1). Then, the tree at (2,1) needs a tent, and the only place is (2,0). The tree at (1,4) needs a tent, and the only place is (1,3). The tree at (0,4) needs a tent, and the only place is (0,3). This satisfies all constraints.\",\n \"answer\": \"[[e, e, e, tt, tt], [e, e, e, e, e], [e, tt, e, e, e], [e, e, tr, e, e], [tt, tr, e, e, e]]\"\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[0,0,0,r,r],[0,0,0,0,0],[0,r,0,0,0],[0,0,r,0,0],[t,r,0,0,0]]\nSolution\n[[0,0,0,t,t],[0,0,0,0,0],[0,t,0,0,0],[0,0,r,0,0],[t,r,0,0,0]]" + }, + { + "role": "user", + "content": "Yes " + }, + { + "role": "user", + "content": "```json\n{\n\"perception\": [[\"tt\", \"*\", \"*\", \"*\", \"tr\", \"tr\"], [\"*\", \"*\", \"*\", \"*\", \"*\", \"*\"], [\"*\", \"tr\", \"*\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"tr\", \"*\", \"*\", \"*\"], [\"*\", \"*\", \"*\", \"*\", \"*\", \"*\"], [\"*\", \"tr\", \"*\", \"*\", \"*\", \"*\"]],\n\"answer\": [[\"tt\", \"e\", \"e\", \"e\", \"tr\", \"tr\"], [\"e\", \"e\", \"e\", \"tt\", \"e\", \"tt\"], [\"tt\", \"tr\", \"e\", \"e\", \"e\", \"e\"], [\"e\", \"e\", \"tr\", \"e\", \"e\", \"e\"], [\"e\", \"tt\", \"e\", \"e\", \"e\", \"e\"], [\"tt\", \"tr\", \"e\", \"e\", \"e\", \"e\"]]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\n[[t,0,0,0,r,r],[0,0,0,0,0,0],[0,r,0,0,0,0],[0,0,r,0,0,0],[0,0,0,0,0,0],[0,r,0,0,0,0]]\nSolution\n[[t,0,0,0,r,r],[0,0,0,t,0,t],[t,r,0,0,0,0],[0,0,r,0,0,0],[0,t,0,0,0,0],[t,r,0,0,0,0]]" + }, + { + "role": "user", + "content": "Yes, though the input is not an array, but the message is clear. " + }, + { + "role": "user", + "content": "```\n{\n \"perception\": \"The current state of the game is as follows:\n 2 trees in the top left,\n 1 tree in the top right,\n 4 trees in the bottom left,\n 1 tree in the bottom right,\n 1 tent in the bottom left.\n\n The numbers on the sides indicate that there should be 2 tents in the first row,\n 2 tents in the second row,\n 1 tent in the third row,\n 0 tents in the fourth row.\",\n \"think\": \"To find a valid solution, we need to place tents such that each tree is paired with exactly one tent. Tents cannot touch each other, even diagonally.\n \n Let's start by placing tents next to the trees in the bottom left corner. Since there are 4 trees, we can place 4 tents.\n \n Next, we can place the tent next to the tree in the top right. Since there is only one tree in this column, we can place one tent here.\n \n Now, we need to place tents in the second row. We have already placed one tent in the bottom left. Since there are 2 trees in this row, we need to place 2 more tents in this row. We can place them next to the trees that are not paired yet.\n \n Finally, we need to place the tent in the third row next to the tree that is not paired yet.\n \n Since there are no more trees to pair with tents, and we have already placed the required number of tents in each row, our solution is complete.\",\n \"answer\": \"trtrtrtrtt**\n 11e**\n 11e**\n 0000\"\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\nWrong\nSolution\nWrong" + }, + { + "role": "user", + "content": "Yes, as the perception is far from a 2d array, and the output contains unrecoginizable values 1, so you just put a Wrong" + }, + { + "role": "user", + "content": "```json\n{\n\"think\": \"To solve this Trees and Tents puzzle, I need to place tents on the grid such that each tree is adjacent to exactly one tent, and no tents touch each other, even diagonally. The row and column clues provide the number of tents that must be placed in each row and column, respectively.\n\nI'll start by analyzing the given clues:\nRow clues: [1, 0, 2, 0, 2]\n- Row 0 has 1 tent\n- Row 1 has no tents\n- Row 2 has 2 tents\n- Row 3 has no tents\n- Row 4 has 2 tents\n\nColumn clues: [0, 1, 2, 1, 1]\n- Column 0 has no tents\n- Column 1 has 1 tent\n- Column 2 has 2 tents\n- Column 3 has 1 tent\n- Column 4 has 1 tent\n\nNext, I'll place the tents based on the given board state and clues:\n- Row 0: The only possible position for the tent is (0, 2) since it's the only empty cell.\n- Row 1: No tents need to be placed.\n- Row 2: The two tents must be placed at (2, 1) and (2, 3) to be adjacent to the trees.\n- Row 3: No tents need to be placed.\n- Row 4: The two tents must be placed at (4, 0) and (4, 2) to be adjacent to the trees and satisfy the column clues.\n\nThe final solution is:\n\",\n\"answer\": [\n [\"e\", \"e\", \"tt\", \"tr\", \"e\"],\n [\"e\", \"e\", \"e\", \"e\", \"tr\"],\n [\"e\", \"tr\", \"tt\", \"tt\", \"e\"],\n [\"e\", \"e\", \"e\", \"tr\", \"e\"],\n [\"tr\", \"tt\", \"tt\", \"e\", \"e\"]\n]\n}\n```" + }, + { + "role": "assistant", + "content": "Initial State\nWrong\nSolution\n[[0,0,t,r,0],[0,0,0,0,r],[0,r,t,t,0],[0,0,0,r,0],[r,t,t,0,0]]" + }, + { + "role": "user", + "content": "Yes, perception is missing so you put a Wrong, but the solution is valid." + }, + + { + "role": "user", + "content": "```json\n{\"answer\": [[\"e\", \"e\", \"tt\", \"tr\", \"e\"], [\"e\", \"e\", \"e\", \"e\", \"tr\"], [\"e\", \"tr\", \"e\", \"e\", \"e\"], [\"e\", \"e\", \"e\", \"tr\", \"e\"], [\"tr\", \"tt\", \"e\", \"e\", \"e\"]]}\n```" + }, + { + "role": "assistant", + "content": "Initial State\nWrong\nSolution\n[[0,0,t,r,0],[0,0,0,0,r],[0,r,0,0,0],[0,0,0,r,0],[r,t,0,0,0]]" + }, + { + "role": "user", + "content": "Yes, perception is missing so you put a Wrong, but the solution is valid." + } + +] \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/binairo.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/binairo.py new file mode 100644 index 0000000000000000000000000000000000000000..eb477c4502d45138c41fc6eebab1599d154fb81f --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/binairo.py @@ -0,0 +1,97 @@ +from typing import List, Dict, Any, Tuple +import random +import copy +import os +import json +import argparse + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint + +class ConstraintRowBalance(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_row_balance" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + expected_count = size // 2 + + assert all(all(cell != '*' for cell in row) for row in board), "'*' should be replaced by '0' in the initialization board" + + for row in board: + if 0 not in row: # Only check completed rows + white_count = sum(1 for x in row if x == 'w') + black_count = sum(1 for x in row if x == 'b') + if white_count != black_count or white_count != expected_count: + return False + return True + +class ConstraintColBalance(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_col_balance" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + expected_count = size // 2 + + for col in range(size): + column = [board[row][col] for row in range(size)] + if 0 not in column and '*' not in column: # Only check completed columns + white_count = sum(1 for x in column if x == 'w') + black_count = sum(1 for x in column if x == 'b') + if white_count != black_count or white_count != expected_count: + return False + return True + +class ConstraintNoTripleAdjacent(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_no_triple_adjacent" + + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + + # Check rows + for row in range(size): + for col in range(size - 2): + if (board[row][col] != 0 and + board[row][col] == board[row][col + 1] == board[row][col + 2]): + return False + + # Check columns + for col in range(size): + for row in range(size - 2): + if (board[row][col] != 0 and + board[row][col] == board[row + 1][col] == board[row + 2][col]): + return False + return True + + +class BinairoPuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + if size < 4 or size % 2 != 0: + raise ValueError("Size must be an even number greater than or equal to 4") + self.game_name = "binairo" + self.size = size + self.constraints = [ + ConstraintRowBalance(), + ConstraintColBalance(), + ConstraintNoTripleAdjacent(), + # ConstraintUniqueLines() + ] + self.all_possible_values = ['w', 'b'] # 'w' for white, 'b' for black + + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[int]: + possible_values = [] + board = game_state["board"] + original_value = board[row][col] + for value in self.all_possible_values: + board[row][col] = value + if self.check(game_state): + possible_values.append(value) + board[row][col] = original_value + return possible_values diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/coloredsudoku.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/coloredsudoku.py new file mode 100644 index 0000000000000000000000000000000000000000..e4d83ef014fbb7a43345816961cd4b668604d94c --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/coloredsudoku.py @@ -0,0 +1,58 @@ +from typing import List, Tuple, Union, Dict, Any +import random +import copy +import os +import json + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint, ConstraintRowNoRepeat, ConstraintColNoRepeat, ConstraintSubGridNoRepeat + +class ConstraintColorNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_color_no_repeat" + + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + colors = game_state.get("colors", None) + + # If no colors are specified, skip this constraint + if colors is None: + return True + + color_groups = {} + for i in range(len(board)): + for j in range(len(board[0])): + color = colors[i][j] + if color not in color_groups: + color_groups[color] = [] + if board[i][j] != 0: + color_groups[color].append(board[i][j]) + for color_values in color_groups.values(): + if len(set(color_values)) != len(color_values): + return False + return True + +class ColoredSudokuPuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + self.game_name = "coloredsudoku" + self.size = size + self.constraints = [ + ConstraintRowNoRepeat(), + ConstraintColNoRepeat(), + ConstraintColorNoRepeat() + ] + self.all_possible_values = [i for i in range(1, size + 1)] + self.colors = [chr(65 + i) for i in range(size)] + + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[int]: + possible_values = [] + board = game_state["board"] + original_value = board[row][col] + for value in self.all_possible_values: + board[row][col] = value + if self.check(game_state): + possible_values.append(value) + board[row][col] = original_value + return possible_values diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/common_constriants.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/common_constriants.py new file mode 100644 index 0000000000000000000000000000000000000000..4c4f14146c1e938b1ef782c0e11c9771542253ec --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/common_constriants.py @@ -0,0 +1,52 @@ +from typing import Dict, Any + +class Constraint(): + def __init__(self) -> None: + self.name = "" + def check(self, game_state: Dict[str, Any]) -> bool: + pass + +class ConstraintRowNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_row_no_repeat" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + for row in board: + row_tmp = [cell for cell in row if cell != 0] + if len(set(row_tmp)) != len(row_tmp): + return False + return True + +class ConstraintColNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_col_no_repeat" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + for col in range(len(board[0])): + col_tmp = [board[row][col] for row in range(len(board)) if board[row][col] != 0] + if len(set(col_tmp)) != len(col_tmp): + return False + return True + +class ConstraintSubGridNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_sub_grid_no_repeat" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + assert len(board) == len(board[0]), "board is not square" + assert len(board) in [4, 9], "board size is not 4 or 9" + + sub_grid_size = int(len(board) ** 0.5) + for i in range(0, len(board), sub_grid_size): + for j in range(0, len(board[0]), sub_grid_size): + sub_grid = [ + board[x][y] for x in range(i, i + sub_grid_size) + for y in range(j, j + sub_grid_size) + if board[x][y] != 0 + ] + if len(set(sub_grid)) != len(sub_grid): + return False + return True diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/common_get_prompt.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/common_get_prompt.py new file mode 100644 index 0000000000000000000000000000000000000000..f189259aeaa8ea35e5907fd768ece80989b74787 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/common_get_prompt.py @@ -0,0 +1,52 @@ +def get_prompt(game_type: str, thinking_format: str) -> str: + if game_type == "sudoku": + from puzzles.sudoku import SYSTEM_PROMPT + elif game_type == "coloredsudoku": + from puzzles.coloredsudoku import SYSTEM_PROMPT + elif game_type == "binairo": + from puzzles.binairo import SYSTEM_PROMPT + elif game_type == "futoshiki": + from puzzles.futoshiki import SYSTEM_PROMPT + elif game_type == "hitori": + from puzzles.hitori import SYSTEM_PROMPT + elif game_type == "kakuro": + from puzzles.kakuro import SYSTEM_PROMPT + elif game_type == "killersudoku": + from puzzles.killersudoku import SYSTEM_PROMPT + elif game_type == "renzoku": + from puzzles.renzoku import SYSTEM_PROMPT + elif game_type == "skyscraper": + from puzzles.skyscraper import SYSTEM_PROMPT + elif game_type == "starbattle": + from puzzles.starbattle import SYSTEM_PROMPT + elif game_type == "sudoku": + from puzzles.sudoku import SYSTEM_PROMPT + elif game_type == "treesandtents": + from puzzles.treesandtents import SYSTEM_PROMPT + elif game_type == "thermometers": + from puzzles.thermometers import SYSTEM_PROMPT + elif game_type == "kakurasu": + from puzzles.kakurasu import SYSTEM_PROMPT + elif game_type == "aquarium": + from puzzles.aquarium import SYSTEM_PROMPT + elif game_type == "oddevensudoku": + from puzzles.oddevensudoku import SYSTEM_PROMPT + + elif game_type == "battleships": + from puzzles.battleships import SYSTEM_PROMPT + elif game_type == "fieldexplore": + from puzzles.fieldexplore import SYSTEM_PROMPT + elif game_type == "jigsawsudoku": + from puzzles.jigsawsudoku import SYSTEM_PROMPT + elif game_type == "nonogram": + from puzzles.nonogram import SYSTEM_PROMPT + elif game_type == "lightup": + from puzzles.lightup import SYSTEM_PROMPT + + else: + raise ValueError(f"Unknown game type: {game_type}") + + if thinking_format == "direct_solution": + return SYSTEM_PROMPT["direct_solution"] + else: + return SYSTEM_PROMPT["cot"] diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/fieldexplore.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/fieldexplore.py new file mode 100644 index 0000000000000000000000000000000000000000..8b6cfe7a6bcd22781f1b7cc97d18247ae83cc06f --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/fieldexplore.py @@ -0,0 +1,63 @@ +import numpy as np +from typing import List, Dict, Any, Tuple +import random +import copy +import os +import argparse + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint + +class ConstraintAdjacentNumbers(Constraint): + """Ensures revealed numbers match adjacent mine counts""" + def check(self, game_state: List[List[Any]]) -> bool: + + board = game_state["board"] + + size = len(board) + for i in range(size): + for j in range(size): + if isinstance(board[i][j], int) and board[i][j] != 0: # If cell is a revealed number + # Count adjacent mines and undefined cells + i_start = max(0, i-1) + i_end = min(size, i+2) + j_start = max(0, j-1) + j_end = min(size, j+2) + + adjacent_mines = sum(1 for r in range(i_start, i_end) + for c in range(j_start, j_end) + if board[r][c] == 's') + + adjacent_undefined = sum(1 for r in range(i_start, i_end) + for c in range(j_start, j_end) + if board[r][c] == 0) + + # Check if current mines <= number <= potential mines (current + undefined) + if adjacent_mines > board[i][j] or adjacent_mines + adjacent_undefined < board[i][j]: + return False + return True + +class FieldExplorePuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + self.game_name = "fieldexplore" + self.size = size + self.constraints = [ConstraintAdjacentNumbers()] + self.all_possible_values = ['s', 'e'] # True for 's', False for 'e' + + def check(self, board: List[List[Any]]) -> bool: + for constraint in self.constraints: + if not constraint.check(board): + return False + return True + + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[int]: + possible_values = [] + board = game_state["board"] + original_value = board[row][col] + for value in self.all_possible_values: + board[row][col] = value + if self.check(game_state): + possible_values.append(value) + board[row][col] = original_value + return possible_values diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/futoshiki.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/futoshiki.py new file mode 100644 index 0000000000000000000000000000000000000000..b2d3c2fdb74678d58f16f7f94fb59846ea7f25cc --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/futoshiki.py @@ -0,0 +1,95 @@ +from typing import List, Dict, Any, Tuple +import random +import copy +import os +import json +import argparse + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint + +class ConstraintRowNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_row_no_repeat" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + for row in board: + values = [x for x in row if x != 0] + if len(set(values)) != len(values): + return False + return True + +class ConstraintColNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_col_no_repeat" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + for col in range(size): + values = [board[row][col] for row in range(size) if board[row][col] != 0] + if len(set(values)) != len(values): + return False + return True + +class ConstraintInequality(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_inequality" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + inequalities = game_state.get("inequalities", {"row": [], "col": []}) + # Check row inequalities + row_ineq = inequalities.get("row", [['' for _ in range(size-1)] for _ in range(size)]) + for row in range(size): + for col in range(size-1): + if row_ineq[row][col] == '<': + if board[row][col] != 0 and board[row][col+1] != 0: + if board[row][col] >= board[row][col+1]: + return False + elif row_ineq[row][col] == '>': + if board[row][col] != 0 and board[row][col+1] != 0: + if board[row][col] <= board[row][col+1]: + return False + # Check column inequalities + col_ineq = inequalities.get("col", [['' for _ in range(size)] for _ in range(size-1)]) + for row in range(size-1): + for col in range(size): + if col_ineq[row][col] == '^': + if board[row][col] != 0 and board[row+1][col] != 0: + if board[row][col] >= board[row+1][col]: + return False + elif col_ineq[row][col] == 'v': + if board[row][col] != 0 and board[row+1][col] != 0: + if board[row][col] <= board[row+1][col]: + return False + return True + + + +class FutoshikiPuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + if size < 3 or size > 12: + raise ValueError("Grid size must be between 3 and 9") + self.game_name = "futoshiki" + self.size = size + self.constraints = [ + ConstraintRowNoRepeat(), + ConstraintColNoRepeat(), + ConstraintInequality() + ] + self.all_possible_values = [i for i in range(1, size + 1)] + + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[int]: + possible_values = [] + board = game_state["board"] + original_value = board[row][col] + for value in self.all_possible_values: + board[row][col] = value + if self.check(game_state): + possible_values.append(value) + board[row][col] = original_value + return possible_values diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/hitori.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/hitori.py new file mode 100644 index 0000000000000000000000000000000000000000..ba97a8dcc3b5aec466402be05b3dc68de215d98a --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/hitori.py @@ -0,0 +1,109 @@ +from typing import List, Dict, Any, Tuple +import random +import copy +import os +import json +import argparse + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint + +class ConstraintHitoriNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_hitori_no_repeat" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] # This is the shading state + numbers = game_state.get("numbers", []) # Get the numbers from additional state + size = len(board) + # Check rows and columns for unshaded duplicates + for i in range(size): + row_values = [numbers[i][j] for j in range(size) if board[i][j] == "e"] # 'e' means unshaded + col_values = [numbers[j][i] for j in range(size) if board[j][i] == "e"] + if len(row_values) != len(set(row_values)) or len(col_values) != len(set(col_values)): + return False + return True + +class ConstraintHitoriAdjacent(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_hitori_adjacent" + + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + + for row in range(size): + for col in range(size): + if board[row][col] == "s": # shaded cell + # Check adjacent cells + for dr, dc in [(-1, 0), (1, 0), (0, -1), (0, 1)]: + nr, nc = row + dr, col + dc + if 0 <= nr < size and 0 <= nc < size and board[nr][nc] == "s": + return False + return True + +class ConstraintHitoriConnected(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_hitori_connected" + + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + + # Find first unshaded or undecided cell + start = None + for r in range(size): + for c in range(size): + if board[r][c] in ["e", 0]: # 'e' means unshaded, 0 means undecided + start = (r, c) + break + if start: + break + + if not start: + return False + + # BFS to check connectivity + visited = [[False] * size for _ in range(size)] + queue = [start] + visited[start[0]][start[1]] = True + while queue: + r, c = queue.pop(0) + for dr, dc in [(-1, 0), (1, 0), (0, -1), (0, 1)]: + nr, nc = r + dr, c + dc + if (0 <= nr < size and 0 <= nc < size and + not visited[nr][nc] and board[nr][nc] in ["e", 0]): + visited[nr][nc] = True + queue.append((nr, nc)) + + # Check if all unshaded and undecided cells are visited + for r in range(size): + for c in range(size): + if board[r][c] in ["e", 0] and not visited[r][c]: + return False + return True + +class HitoriPuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + self.game_name = "hitori" + self.size = size + self.constraints = [ + ConstraintHitoriNoRepeat(), + ConstraintHitoriAdjacent(), + ConstraintHitoriConnected() + ] + self.all_possible_values = ["e", "s"] # 'e' for empty/unshaded, 's' for shaded + + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[int]: + possible_values = [] + board = game_state["board"] + original_value = board[row][col] + for value in self.all_possible_values: + board[row][col] = value + if self.check(game_state): + possible_values.append(value) + board[row][col] = original_value + return possible_values diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/jigsawsudoku.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/jigsawsudoku.py new file mode 100644 index 0000000000000000000000000000000000000000..45e28de5d949a0cfa6dcafa1cd72889709c15b49 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/jigsawsudoku.py @@ -0,0 +1,70 @@ +from typing import List, Tuple, Union, Dict, Any +import random +import copy +import os +import json + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint, ConstraintRowNoRepeat, ConstraintColNoRepeat, ConstraintSubGridNoRepeat + +class ConstraintRegionNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_region_no_repeat" + + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + regions = game_state.get("regions", None) + + if regions is None: + return True + + region_groups = {} + for i in range(len(board)): + for j in range(len(board[0])): + region = regions[i][j] + if region not in region_groups: + region_groups[region] = [] + if board[i][j] != 0: + region_groups[region].append(board[i][j]) + for region_values in region_groups.values(): + if len(set(region_values)) != len(region_values): + return False + return True + +class JigsawSudokuPuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + self.game_name = "jigsawsudoku" + self.size = size + self.constraints = [ + ConstraintRowNoRepeat(), + ConstraintColNoRepeat(), + ConstraintRegionNoRepeat() + ] + self.all_possible_values = [i for i in range(1, size + 1)] + self.cached_region_splits = [] + + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[int]: + """Get possible values for a cell based on row, column, and region constraints.""" + if game_state["board"][row][col] != 0: + return [] + possible_values = [] + for value in self.all_possible_values: + # Try the value + original_value = game_state["board"][row][col] + game_state["board"][row][col] = value + # Check if it's valid according to all constraints + valid = True + for constraint in self.constraints: + if not constraint.check(game_state): + valid = False + break + + # Restore original value + game_state["board"][row][col] = original_value + + if valid: + possible_values.append(value) + + return possible_values diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/lightup.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/lightup.py new file mode 100644 index 0000000000000000000000000000000000000000..9355ce7a589d5bc807ba5c6117e550544f584952 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/lightup.py @@ -0,0 +1,157 @@ +import argparse +import random +from typing import Any, Dict, List, Tuple +import os + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint, ConstraintRowNoRepeat, ConstraintColNoRepeat, ConstraintSubGridNoRepeat + +class ConstraintLightUpBulb(Constraint): + """Ensures that light bulbs don't illuminate each other. + This constraint checks that no two light bulbs ('s') can see each other in any straight line + (horizontally or vertically) without a wall between them. If two bulbs can see each other, + the constraint fails. + """ + + def __init__(self) -> None: + super().__init__() + self.name = "constraint_lightup_bulb" + + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + + for row in range(size): + for col in range(size): + if board[row][col] == 's': # Check light sources + # Check each direction + for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]: + nx, ny = row + dx, col + dy + while 0 <= nx < size and 0 <= ny < size: + if board[nx][ny] == 'w': # Wall + break + if board[nx][ny] == 's': # Another light + return False + # Skip undefined (0) and empty ('e') cells + nx += dx + ny += dy + return True + +class ConstraintLightUpWall(Constraint): + """Ensures that numbered walls have the correct number of adjacent light bulbs. + This constraint verifies that each numbered wall has exactly the specified number of light + bulbs placed in orthogonally adjacent cells. The constraint fails if: + 1. A numbered wall has more adjacent light bulbs than its number + 2. A numbered wall cannot possibly reach its required number with the remaining undefined cells + """ + + def __init__(self) -> None: + super().__init__() + self.name = "constraint_lightup_wall" + + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + + wall_numbers = game_state["wall_numbers"] + + if not wall_numbers: + return True + + size = len(board) + + for row in range(size): + for col in range(size): + + if board[row][col] == 'w' and wall_numbers[row][col] != -1: + light_count = 0 + undefined_count = 0 + + for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]: + nx, ny = row + dx, col + dy + if 0 <= nx < size and 0 <= ny < size: + if board[nx][ny] == 's': + light_count += 1 + elif board[nx][ny] == 0: # Count undefined cells + undefined_count += 1 + + # Fail if: + # 1. We have too many definite lights, or + # 2. We don't have enough potential lights (current + undefined) to reach the required number + if (light_count > wall_numbers[row][col] or + light_count + undefined_count < wall_numbers[row][col]): + return False + return True + +class ConstraintLightUpIllumination(Constraint): + """Ensures that all non-wall cells are illuminated by at least one light bulb. + This constraint verifies that every empty cell ('e') is illuminated by at least one light bulb + or could potentially be illuminated by an undefined cell. For each empty cell, we check in all + four directions (up, down, left, right) until hitting a wall. If none of these directions + contain either a light bulb ('s') or an undefined cell (0), then the cell cannot be illuminated + in any valid solution. + """ + + def __init__(self) -> None: + super().__init__() + self.name = "constraint_lightup_illumination" + + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + + # For each empty cell ('e'), check if it can be illuminated + for row in range(size): + for col in range(size): + if board[row][col] == 'e': + can_be_illuminated = False + # Check all four directions until hitting a wall + for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]: + nx, ny = row + dx, col + dy + while 0 <= nx < size and 0 <= ny < size: + if board[nx][ny] == 'w': # Hit a wall, stop checking this direction + break + if board[nx][ny] == 's' or board[nx][ny] == 0: # Found light or potential light + can_be_illuminated = True + break + nx += dx + ny += dy + + if can_be_illuminated: # If we found a light source, no need to check other directions + break + + if not can_be_illuminated: # If no direction had a light or potential light + return False + + return True + +class LightUpPuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + if size < 3: + raise ValueError("Size must be at least 3") + + self.game_name = "lightup" + self.size = size + + self.constraints = [ + ConstraintLightUpBulb(), + ConstraintLightUpWall(), + ConstraintLightUpIllumination() + ] + + self.all_possible_values = ['s', 'e'] # 's' for source/light, 'e' for empty + + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[int]: + board = game_state["board"] + if board[row][col] in [-1, 1, 2, 3, 4]: # Wall or numbered wall + return [] + + possible_values = [] + original_value = board[row][col] + + for value in self.all_possible_values: + board[row][col] = value + if self.check(game_state): + possible_values.append(value) + board[row][col] = original_value + return possible_values diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/nonogram.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/nonogram.py new file mode 100644 index 0000000000000000000000000000000000000000..a1d8fc537151b4ba8783173889964824302a62da --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/nonogram.py @@ -0,0 +1,130 @@ +import random +import numpy as np +import argparse +import os +from typing import List, Dict, Any, Tuple + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint + +class ConstraintBase: + def _check_line_hints(self, line, hints) -> bool: + # Convert line to runs of filled cells ('s' = filled, 'e' = empty, 0 = undefined) + runs = [] # Will store lengths of consecutive filled cells + count = 0 # Counter for current run length + current_run = [] # Track cells in current run for debugging/future use + + if hints == [0]: + # the line should not contain 's' + return line.count('s') == 0 + + # First pass: Calculate runs of filled cells + for cell in line: + if cell == "s": # Found a filled cell + count += 1 + current_run.append(cell) + elif cell == "e": # Found an empty cell + if count > 0: # If we were counting a run + runs.append(count) + count = 0 + current_run = [] + else: # cell is 0 (undefined) + if count > 0: + current_run.append(cell) + # Don't forget to add the last run if it exists + if count > 0: + runs.append(count) + # Calculate cell statistics + filled_cells = line.count("s") # Number of definitely filled cells + undefined_cells = line.count(0) # Number of cells yet to be determined + required_cells = sum(hints) # Total number of cells that should be filled according to hints + + # Early failure: Check if we have enough cells to satisfy hints + if filled_cells + undefined_cells < required_cells: + return False + + # For completely defined lines (no undefined cells) + if undefined_cells == 0: + # Simple comparison: runs must exactly match hints + if runs != hints: + return False + else: + # For partially defined lines, check if current definite runs are valid + definite_runs = [] + count = 0 + # Calculate runs that are definitely complete (bounded by empty cells or edges) + for cell in line: + if cell == "s": + count += 1 + elif (cell == "e" or cell == 0) and count > 0: + definite_runs.append(count) + count = 0 + if cell == 0: # Stop at first undefined cell + break + if count > 0: + definite_runs.append(count) + # Validate the definite runs we've found + if definite_runs: + # Can't have more runs than hints + if len(definite_runs) > len(hints): + return False + # FIXME: Additional validation commented out + # Check if any run is longer than corresponding hint + # if any(definite_runs[j] > hints[j] for j in range(len(definite_runs))): + # return False + return True + +class ConstraintRowHints(ConstraintBase): + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + hints = game_state.get("hints", None) + if not hints: + raise ValueError("Hints are not provided") + row_hints = hints["row_hints"] + + for i, row in enumerate(board): + if not self._check_line_hints(row, row_hints[i]): + return False + return True + +class ConstraintColHints(ConstraintBase): + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + hints = game_state.get("hints", None) + if not hints: + raise ValueError("Hints are not provided") + + col_hints = hints["col_hints"] + size = len(board) + + for j in range(size): + col = [board[i][j] for i in range(size)] + if not self._check_line_hints(col, col_hints[j]): + return False + return True + +class NonogramPuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + self.game_name = "nonogram" + self.size = size + self.constraints = [ + ConstraintRowHints(), + ConstraintColHints() + ] + self.all_possible_values = ["e", "s"] # Consistent with paper + + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[str]: + board = game_state["board"] + if board[row][col] != 0: # If cell is already filled + return [] + + possible_values = [] + original_value = board[row][col] + + for value in self.all_possible_values: + board[row][col] = value + if self.check(game_state): + possible_values.append(value) + board[row][col] = original_value + return possible_values diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/oddevensudoku.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/oddevensudoku.py new file mode 100644 index 0000000000000000000000000000000000000000..29f855a901deafe0a433be6ebea02ae9c6164e43 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/oddevensudoku.py @@ -0,0 +1,117 @@ +from typing import List, Tuple, Union, Dict, Any +import random +import copy +import os +import json + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint + +class ConstraintOddEven(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_odd_even" + + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + cell_types = game_state.get("cell_types", None) + + # If no cell types are specified, skip this constraint + if cell_types is None: + return True + + for i in range(len(board)): + for j in range(len(board[0])): + if board[i][j] != 0: # Skip empty cells + is_even = board[i][j] % 2 == 0 + if (cell_types[i][j] == 'w' and not is_even) or \ + (cell_types[i][j] == 'b' and is_even): + return False + return True + +class ConstraintRowNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_row_no_repeat" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + for row in board: + # Create a list of non-zero values in the row + values = [x for x in row if x != 0] + # Check if there are any duplicates + if len(values) != len(set(values)): + return False + return True + +class ConstraintColNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_col_no_repeat" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + for col in range(size): + # Create a list of non-zero values in the column + values = [board[row][col] for row in range(size) if board[row][col] != 0] + # Check if there are any duplicates + if len(values) != len(set(values)): + return False + return True + +class ConstraintSubGridNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_sub_grid_no_repeat" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + sub_size = int(size ** 0.5) # Size of sub-grid (2 for 4x4, 3 for 9x9) + # Check each sub-grid + for box_row in range(0, size, sub_size): + for box_col in range(0, size, sub_size): + # Get all non-zero values in the current sub-grid + values = [] + for i in range(sub_size): + for j in range(sub_size): + value = board[box_row + i][box_col + j] + if value != 0: + values.append(value) + # Check for duplicates + if len(values) != len(set(values)): + return False + return True + +class OddEvenSudokuPuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + self.game_name = "oddevensudoku" + self.size = size + self.constraints = [ + ConstraintRowNoRepeat(), + ConstraintColNoRepeat(), + ConstraintSubGridNoRepeat(), + ConstraintOddEven() + ] + self.all_possible_values = [i for i in range(1, size + 1)] + + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[int]: + possible_values = [] + board = game_state["board"] + cell_types = game_state.get("cell_types", None) + original_value = board[row][col] + + # Filter values based on odd/even constraint + if cell_types: + cell_type = cell_types[row][col] + filtered_values = [v for v in self.all_possible_values if + (cell_type == 'w' and v % 2 == 0) or + (cell_type == 'b' and v % 2 == 1)] + else: + filtered_values = self.all_possible_values + + for value in filtered_values: + board[row][col] = value + if self.check(game_state): + possible_values.append(value) + board[row][col] = original_value + return possible_values diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/renzoku.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/renzoku.py new file mode 100644 index 0000000000000000000000000000000000000000..ac901209e2a1ff555cffccf11f60b6e015803ea1 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/renzoku.py @@ -0,0 +1,106 @@ +from typing import List, Dict, Any, Tuple +import random +import copy +import os + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint + +class ConstraintRowNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_row_no_repeat" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + for row in board: + values = [x for x in row if x != 0] + if len(set(values)) != len(values): + return False + return True + +class ConstraintColNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_col_no_repeat" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + for col in range(size): + values = [board[row][col] for row in range(size) if board[row][col] != 0] + if len(set(values)) != len(values): + return False + return True + +class ConstraintAdjacency(Constraint): + def __init__(self) -> None: + super().__init__() + self.name = "constraint_adjacency" + + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + # Get hints with proper default structure + # default_hints = { + # "row": [['0' for _ in range(size - 1)] for _ in range(size)], + # "col": [['0' for _ in range(size)] for _ in range(size - 1)] + # } + # always use hints from the game state + hints = game_state.get("hints") # , default_hints) + # Ensure hints have proper dimensions + if len(hints.get("row", [])) < size: + hints["row"] = [['0' for _ in range(size - 1)] for _ in range(size)] + if len(hints.get("col", [])) < size - 1: + hints["col"] = [['0' for _ in range(size)] for _ in range(size - 1)] + # convert board to int + board_copy = copy.deepcopy(board) + for i in range(size): + for j in range(size): + if board_copy[i][j] != 0: + board_copy[i][j] = int(board_copy[i][j]) + + # Check row adjacency hints + for row in range(size): + for col in range(size - 1): + if hints["row"][row][col] == "1": + if board_copy[row][col] == 0 or board_copy[row][col + 1] == 0: + continue + if abs(board_copy[row][col] - board_copy[row][col + 1]) != 1: + return False + # Check column adjacency hints + for row in range(size - 1): + for col in range(size): + if hints["col"][row][col] == "1": + if board_copy[row][col] == 0 or board_copy[row + 1][col] == 0: + continue + if abs(board_copy[row][col] - board_copy[row + 1][col]) != 1: + return False + return True + + + + +class RenzokuPuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + if size < 4 or size > 12: + raise ValueError("Grid size must be between 4 and 12") + self.game_name = "renzoku" + self.size = size + self.constraints = [ + ConstraintRowNoRepeat(), + ConstraintColNoRepeat(), + ConstraintAdjacency() + ] + self.all_possible_values = [i for i in range(1, size + 1)] + self.num_solver_processes = max(os.cpu_count() // 2, 1) # Limit to 4 processes or CPU count + + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[int]: + possible_values = [] + board = game_state["board"] + original_value = board[row][col] + for value in self.all_possible_values: + board[row][col] = value + if self.check(game_state): + possible_values.append(value) + board[row][col] = original_value + return possible_values diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/skyscraper.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/skyscraper.py new file mode 100644 index 0000000000000000000000000000000000000000..e92ef611921dd94404c9a6cdfae560b06a796dcf --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/skyscraper.py @@ -0,0 +1,95 @@ +from typing import List, Dict, Any, Tuple +import random +import copy +import os +import json + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint, ConstraintRowNoRepeat, ConstraintColNoRepeat, ConstraintSubGridNoRepeat + +class ConstraintRowNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + for row in board: + values = [x for x in row if x != 0] + if len(set(values)) != len(values): + return False + return True + +class ConstraintColNoRepeat(Constraint): + def __init__(self) -> None: + super().__init__() + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + for col in range(size): + values = [board[row][col] for row in range(size) if board[row][col] != 0] + if len(set(values)) != len(values): + return False + return True + +class ConstraintVisibility(Constraint): + def __init__(self) -> None: + super().__init__() + def calculate_visible_buildings(self, line: List[int]) -> int: + visible = 0 + max_height = 0 + for height in line: + if int(height) > max_height: + visible += 1 + max_height = int(height) + return visible + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + clues = game_state["clues"] + size = len(board) + + # Check all directions + for i in range(size): + # Top clues + if clues["top"][i] != 0: + col = [board[row][i] for row in range(size)] + if 0 not in col and self.calculate_visible_buildings(col) != clues["top"][i]: + return False + # Bottom clues + if clues["bottom"][i] != 0: + col = [board[row][i] for row in range(size-1, -1, -1)] + if 0 not in col and self.calculate_visible_buildings(col) != clues["bottom"][i]: + return False + # Left clues + if clues["left"][i] != 0: + if 0 not in board[i] and self.calculate_visible_buildings(board[i]) != clues["left"][i]: + return False + # Right clues + if clues["right"][i] != 0: + if 0 not in board[i] and self.calculate_visible_buildings(board[i][::-1]) != clues["right"][i]: + return False + return True + +class SkyscraperPuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + if size < 4 or size > 12: + raise ValueError("Grid size must be between 4 and 12") + self.game_name = "skyscraper" + self.size = size + self.constraints = [ + ConstraintRowNoRepeat(), + ConstraintColNoRepeat(), + ConstraintVisibility() + ] + self.all_possible_values = [i for i in range(1, size + 1)] + self.possible_hint_counts = [4, 5, 6, 7, 8, 9, 10, 11, 12] + + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[int]: + board = game_state["board"] + original_value = board[row][col] + possible_values = [] + for value in self.all_possible_values: + board[row][col] = value + if self.check(game_state): + possible_values.append(value) + board[row][col] = original_value + return possible_values diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/starbattle.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/starbattle.py new file mode 100644 index 0000000000000000000000000000000000000000..0968ebc17d702cbd8e5f3bc2f5f45f46c16254d2 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/starbattle.py @@ -0,0 +1,130 @@ +from typing import List, Dict, Any, Tuple +import random +import copy +import os +import json + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint + +DEBUG_CONSTRAINT_ERROR = False + +class ConstraintRowStar(Constraint): + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + for row_idx, row in enumerate(board): + if 0 not in row: + star_count = sum(1 for cell in row if cell == 's') + if star_count != 1: + if DEBUG_CONSTRAINT_ERROR: + print(f"RowStar constraint failed: Row {row_idx} has {star_count} stars (expected 1)") + return False + else: + star_count = sum(1 for cell in row if cell == 's') + if star_count > 1: + if DEBUG_CONSTRAINT_ERROR: + print(f"RowStar constraint failed: Incomplete row {row_idx} has {star_count} stars (max 1)") + return False + return True + +class ConstraintColStar(Constraint): + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + + for col in range(size): + col_values = [board[row][col] for row in range(size)] + + if 0 not in col_values: + star_count = sum(1 for val in col_values if val == 's') + if star_count != 1: + if DEBUG_CONSTRAINT_ERROR: + print(f"ColStar constraint failed: Column {col} has {star_count} stars (expected 1)") + return False + else: + star_count = sum(1 for val in col_values if val == 's') + if star_count > 1: + if DEBUG_CONSTRAINT_ERROR: + print(f"ColStar constraint failed: Incomplete column {col} has {star_count} stars (max 1)") + return False + return True + +class ConstraintRegionStar(Constraint): + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + regions = game_state["regions"] + size = len(board) + region_counts = {} + for i in range(size): + for j in range(size): + if board[i][j] == 's': + region = regions[i][j] + region_counts[region] = region_counts.get(region, 0) + 1 + if region_counts[region] > 1: + if DEBUG_CONSTRAINT_ERROR: + print(f"RegionStar constraint failed: Region {region} has {region_counts[region]} stars (max 1)") + return False + + for region_num in set(cell for row in regions for cell in row): + region_cells = [(i, j) for i in range(size) for j in range(size) + if regions[i][j] == region_num] + if all(board[i][j] != 0 for i, j in region_cells): + if region_counts.get(region_num, 0) != 1: + if DEBUG_CONSTRAINT_ERROR: + print(f"RegionStar constraint failed: Completed region {region_num} has {region_counts.get(region_num, 0)} stars (expected 1)") + return False + return True + +class ConstraintAdjacentStar(Constraint): + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + for row in range(size): + for col in range(size): + if board[row][col] == 's': + for dr in [-1, 0, 1]: + for dc in [-1, 0, 1]: + if dr == 0 and dc == 0: + continue + new_row, new_col = row + dr, col + dc + if (0 <= new_row < size and + 0 <= new_col < size and + board[new_row][new_col] == 's'): + if DEBUG_CONSTRAINT_ERROR: + print(f"AdjacentStar constraint failed: Stars at ({row},{col}) and ({new_row},{new_col}) are adjacent") + return False + return True + +class StarBattlePuzzleFactory(PuzzleFactory): + def __init__(self, size: int, num_stars: int = 1) -> None: + super().__init__() + self.game_name = "starbattle" + self.size = size + self.num_stars = num_stars + self.colors = [chr(65 + i) for i in range(size)] + # During generation, only use row, column, and adjacent constraints + self.constraints = [ + ConstraintRowStar(), + ConstraintColStar(), + ConstraintAdjacentStar(), + ConstraintRegionStar() + ] + + self.all_possible_values = ['s', 'e'] + + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[str]: + """Get possible values ('e' for empty or 's' for star) for a given cell.""" + board = game_state["board"] + + # If the cell is already filled with 'e' or 's', return empty list + if board[row][col] in ['s', 'e']: + return [] + + # Try both values and return those that don't immediately violate constraints + possible = [] + for val in ['s', 'e']: + board[row][col] = val + if self.check(game_state): + possible.append(val) + board[row][col] = 0 # Reset to initial state + return possible diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/sudoku.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/sudoku.py new file mode 100644 index 0000000000000000000000000000000000000000..d4a11ec89aab2573e5a7f3a2580ed0938e91b738 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/sudoku.py @@ -0,0 +1,32 @@ +from typing import List, Tuple, Union, Dict, Any +import random +import copy +from abc import ABC, abstractmethod +import os +import json +import argparse + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint, ConstraintRowNoRepeat, ConstraintColNoRepeat, ConstraintSubGridNoRepeat + +class SudokuPuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + self.game_name = "sudoku" + self.size = size + + self.constraints.append(ConstraintRowNoRepeat()) + self.constraints.append(ConstraintColNoRepeat()) + self.constraints.append(ConstraintSubGridNoRepeat()) + + self.all_possible_values = [i for i in range(1, size + 1)] + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[int]: + possible_values = [] + board = game_state["board"] + original_value = board[row][col] + for value in self.all_possible_values: + board[row][col] = value + if self.check(game_state): + possible_values.append(value) + board[row][col] = original_value + return possible_values diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/thermometers.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/thermometers.py new file mode 100644 index 0000000000000000000000000000000000000000..a2e1ca7af2595ca4661b7e0b561afdc3b12a031c --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/thermometers.py @@ -0,0 +1,108 @@ +from typing import List, Dict, Any, Tuple +import random +import copy +import os +import json +import argparse + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint + +class ConstraintThermometerFill(Constraint): + """Check if thermometers are filled correctly (from bulb to top, no gaps)""" + def __init__(self) -> None: + super().__init__() + self.name = "constraint_thermometer_fill" + + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + thermometers = game_state.get("clues", {}).get("thermometers", []) # Fixed: get thermometers from clues + + + # Create a set of all thermometer positions for efficient lookup + thermometer_positions = {(r, c) for therm in thermometers for r, c in therm} + + # Check non-thermometer cells are empty or undefined + for i in range(len(board)): + for j in range(len(board[i])): + if (i, j) not in thermometer_positions and board[i][j] == "s": + return False + # Check thermometer filling rules + for thermometer in thermometers: + # Find first empty cell in thermometer + first_empty = -1 + for i, (r, c) in enumerate(thermometer): + if board[r][c] == "e": # if empty + first_empty = i + break + # After first empty, all cells must be empty + if first_empty != -1: + for i, (r, c) in enumerate(thermometer): + if i > first_empty and board[r][c] == "s": # if selected + return False + return True + +class ConstraintThermometerCount(Constraint): + """Check if row and column counts match the clues""" + def __init__(self) -> None: + super().__init__() + self.name = "constraint_thermometer_count" + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + clues = game_state.get("clues", None) + if not clues: + return True + + size = len(board) + row_counts = clues["row_counts"] + col_counts = clues["col_counts"] + + # Check rows + for i in range(size): + row_selected = sum(1 for j in range(size) if board[i][j] == "s") + row_undefined = sum(1 for j in range(size) if board[i][j] == 0) + if 0 not in board[i]: # if row is complete + if row_selected != row_counts[i]: + return False + else: # if row is incomplete + if row_selected > row_counts[i]: # too many selected + return False + if row_selected + row_undefined < row_counts[i]: # impossible to reach target + return False + # Check columns + for j in range(size): + col_selected = sum(1 for i in range(size) if board[i][j] == "s") + col_undefined = sum(1 for i in range(size) if board[i][j] == 0) + if all(board[i][j] != 0 for i in range(size)): # if column is complete + if col_selected != col_counts[j]: + return False + else: # if column is incomplete + if col_selected > col_counts[j]: # too many selected + return False + if col_selected + col_undefined < col_counts[j]: # impossible to reach target + return False + return True + +class ThermometersPuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + if size < 4: + raise ValueError("Size must be at least 4") + self.game_name = "thermometers" + self.size = size + self.constraints = [ + ConstraintThermometerFill(), + ConstraintThermometerCount() + ] + + self.all_possible_values = ["e", "s"] # empty or selected + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[str]: + possible_values = [] + board = game_state["board"] + original_value = board[row][col] + for value in self.all_possible_values: + board[row][col] = value + if self.check(game_state): + possible_values.append(value) + board[row][col] = original_value + return possible_values diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/treesandtents.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/treesandtents.py new file mode 100644 index 0000000000000000000000000000000000000000..421398aafcad698dac966ba7ff1dbdbf35f3278f --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vgrpbench/puzzles/treesandtents.py @@ -0,0 +1,179 @@ +from typing import List, Dict, Any, Tuple +import random +import copy +import os +import json +import argparse + +from .common_puzzle_factory import PuzzleFactory +from .common_constriants import Constraint + +class ConstraintRowTents(Constraint): + def check(self, game_state: Dict[str, Any]) -> bool: + + board = game_state["board"] + # if board[0][0] == 'e' and board[0][1] == 'e': + # import ipdb; ipdb.set_trace() + clues = game_state.get("clues", None) + if not clues: + return True + for i, row in enumerate(board): + if 0 not in row: # If row is complete + tent_count = row.count("tt") + if tent_count != clues["row_clues"][i]: + return False + else: # If row is incomplete + tent_count = row.count("tt") + if tent_count > clues["row_clues"][i]: + return False + return True + +class ConstraintColTents(Constraint): + def check(self, game_state: Dict[str, Any]) -> bool: + + board = game_state["board"] + clues = game_state.get("clues", None) + if not clues: + return True + size = len(board) + for j in range(size): + col = [board[i][j] for i in range(size)] + if 0 not in col: # If column is complete + tent_count = col.count("tt") + if tent_count != clues["col_clues"][j]: + return False + else: # If column is incomplete + tent_count = col.count("tt") + if tent_count > clues["col_clues"][j]: + return False + return True + +class ConstraintTentTree(Constraint): + """ + Check if: + 1. Each tent has exactly one adjacent tree (horizontally or vertically) + 2. Each tree has exactly one adjacent tent (horizontally or vertically) when complete + 3. Each tree should have exactly one tent or potential tent spot (empty cell) adjacent + """ + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + + # Keep track of which trees are paired with which tents + tree_tent_pairs = {} # tree position -> tent position + + # First, check each tent has exactly one adjacent tree + for i in range(size): + for j in range(size): + if board[i][j] == "tt": + adjacent_trees = [] + for di, dj in [(-1,0), (1,0), (0,-1), (0,1)]: # Only orthogonal + ni, nj = i + di, j + dj + if 0 <= ni < size and 0 <= nj < size: + if board[ni][nj] == "tr": + adjacent_trees.append((ni, nj)) + # Each tent must have exactly one adjacent tree + if len(adjacent_trees) != 1: + return False + + tree_pos = adjacent_trees[0] + + tree_tent_pairs[tree_pos] = (i, j) + + # Then, check each tree + for i in range(size): + for j in range(size): + if board[i][j] == "tr": + # Count adjacent tents and empty cells + adjacent_tents = 0 + adjacent_non_allocated = 0 + for di, dj in [(-1,0), (1,0), (0,-1), (0,1)]: + ni, nj = i + di, j + dj + if 0 <= ni < size and 0 <= nj < size: + if board[ni][nj] == "tt": + adjacent_tents += 1 + elif board[ni][nj] == 0: + adjacent_non_allocated += 1 + + if adjacent_tents > 1: + return False + if adjacent_tents == 1: + pass + if adjacent_tents == 0: + if adjacent_non_allocated == 0: + return False + + return True + +class ConstraintAdjacentTents(Constraint): + """ + Check if tents are not adjacent (including diagonally). + """ + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + # Check tents are not adjacent (including diagonally) + for i in range(size): + for j in range(size): + if board[i][j] == "tt": + for di in [-1, 0, 1]: + for dj in [-1, 0, 1]: + if di == 0 and dj == 0: + continue + ni, nj = i + di, j + dj + if 0 <= ni < size and 0 <= nj < size: + if board[ni][nj] == "tt": + return False + return True + +class ConstraintTentTreeCount(Constraint): + """ + Check if: + 1. Number of tents + unallocated cells >= number of trees (during solving) + 2. Number of tents == number of trees (for completed board) + """ + def check(self, game_state: Dict[str, Any]) -> bool: + board = game_state["board"] + size = len(board) + + num_trees = sum(row.count("tr") for row in board) + num_tents = sum(row.count("tt") for row in board) + num_unallocated = sum(row.count(0) for row in board) + + # If board is complete (no unallocated cells) + if num_unallocated == 0: + return num_tents == num_trees + + # During solving, ensure we can still potentially place enough tents + return (num_tents + num_unallocated) >= num_trees + + +class TreesAndTentsPuzzleFactory(PuzzleFactory): + def __init__(self, size: int) -> None: + super().__init__() + self.game_name = "treesandtents" + self.size = size + assert size >= 3, "Size must be at least 3" + self.constraints = [ + ConstraintRowTents(), + ConstraintColTents(), + ConstraintTentTree(), + ConstraintAdjacentTents(), + ConstraintTentTreeCount() + ] + self.all_possible_values = ["tt", 'e'] + self.num_generator_processes = max(os.cpu_count() // 2, 1) # Limit to 4 processes or CPU count + + def get_possible_values(self, game_state: Dict[str, Any], row: int, col: int) -> List[str]: + """Get possible values for a given cell.""" + board = game_state["board"] + if board[row][col] != 0: # If cell is already filled + return [] + possible = [] + original_value = board[row][col] + for value in self.all_possible_values: + board[row][col] = value + if self.check(game_state): + possible.append(value) + board[row][col] = original_value + return possible diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/videoholmes.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/videoholmes.py new file mode 100644 index 0000000000000000000000000000000000000000..6a8892e0003b1cadfb0e922240e25ccd48787a07 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/videoholmes.py @@ -0,0 +1,80 @@ +from ...smp import * +from .multiple_choice import extract_answer_from_item +import numpy as np +import re + +FAIL_MSG = 'Failed to obtain answer via API.' + +TASK_CATEGORIES = [ + 'SR','IMC','TCI','TA','MHR','PAR','CTI', +] + + +def get_dimension_rating(data_path, score_col='score', type_col='question_type'): + data = load(data_path) + acc_by_type = {} + for qtype, group in data.groupby(type_col): + correct = (group[score_col] == 1).sum() + total = len(group) + acc = correct / total if total > 0 else 0 + acc_by_type[qtype] = { + 'correct': int(correct), + 'total': int(total), + 'acc': acc + } + + total_correct = (data[score_col] == 1).sum() + total_count = len(data) + total_acc = total_correct / total_count if total_count > 0 else 0 + + result = { + 'acc_by_type': acc_by_type, + 'total': { + 'correct': int(total_correct), + 'total': int(total_count), + 'acc': total_acc + } + } + + return result + + +def extract_option(pred): + + pattern = r'\s*(.*?)\s*' + try: + matches = re.findall(pattern, pred, re.DOTALL) + except: + matches = [] + + if matches: + choise = matches[-1].strip() + if 'A ' in choise or 'A:' in choise or '[A' in choise: + predicted_answer = 'A' + elif 'B ' in choise or 'B:' in choise or '[B' in choise: + predicted_answer = 'B' + elif 'C ' in choise or 'C:' in choise or '[C' in choise: + predicted_answer = 'C' + elif 'D ' in choise or 'D:' in choise or '[D' in choise: + predicted_answer = 'D' + elif 'E ' in choise or 'E:' in choise or '[E' in choise: + predicted_answer = 'E' + elif 'F ' in choise or 'F:' in choise or '[F' in choise: + predicted_answer = 'F' + elif 'A' in choise: + predicted_answer = 'A' + elif 'B' in choise: + predicted_answer = 'B' + elif 'C' in choise: + predicted_answer = 'C' + elif 'D' in choise: + predicted_answer = 'D' + elif 'E' in choise: + predicted_answer = 'E' + elif 'F' in choise: + predicted_answer = 'F' + else: + predicted_answer = 'WRONG' + else: + predicted_answer = 'WRONG' + return predicted_answer diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/wemath.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/wemath.py new file mode 100644 index 0000000000000000000000000000000000000000..5e90d96e991895150d8e347c972343a7cc10c554 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/wemath.py @@ -0,0 +1,898 @@ +# pylint: skip-file + +import pandas as pd +import json +import numpy as np +import os +import argparse + +from ...smp import load + +# four_dimensional_metrics.py + + +# Function to evaluate steps +def evaluate_evaluate_steps(json, steps): # noqa + jokers = [json[[f'joker_{i}', f'knowledge concept_{i}']] for i in range(1, steps + 1)] + for i in range(steps): + jokers[i].rename( + columns={f'joker_{i + 1}': 'joker', f'knowledge concept_{i + 1}': 'knowledge_concept'}, + inplace=True, + ) + concatenated_steps = pd.concat(jokers, axis=0) + return concatenated_steps + + +# Function to load and process JSON data +def load_and_process_data(filepath): + df = load(filepath) + if 'hit' not in df.columns: + df['processed_answer'] = ( + df['prediction'] + .str.split('Answer') + .str[-1] + .str.strip() + .str.replace(r'[>><<:.]', '', regex=True) + .str.strip() + ) + df['processed_answer'] = df['processed_answer'].apply(lambda x: x[0] if x and x[0] in 'ABCDEFGH' else None) + df['joker'] = df['processed_answer'] == df['answer'] + else: + df['joker'] = df['hit'].astype(bool) + return df + + +# Function to process steps data and merge results +def evaluate_process_steps_data(df, steps): + steps_data = {f'{steps}steps_{i}': df[df['key'] == f'{steps}steps_{i}'] for i in range(1, steps + 1)} + steps_data[f'{steps}steps_multi'] = df[df['key'] == f'{steps}steps_multi'] + for key, data in steps_data.items(): + data.columns = [col + f'_{key.split("_")[-1]}' for col in data.columns] + merged_data = steps_data[f'{steps}steps_1'] + for i in range(2, steps + 1): + merged_data = pd.merge( + merged_data, steps_data[f'{steps}steps_{i}'], left_on=f'ID_1', right_on=f'ID_{i}', how='left' # noqa + ) + merged_data = pd.merge( + merged_data, steps_data[f'{steps}steps_multi'], left_on=f'ID_1', right_on='ID_multi', how='left' # noqa + ) + return merged_data + + +# Function to calculate evaluation metrics +def evaluate_calculate_metrics(merged_2steps, merged_3steps): + metrics = {} + metrics['steps2_filtered_rows_1_loose'] = merged_2steps[ + ((merged_2steps['joker_1'] == False) & (merged_2steps['joker_2'] == False)) # noqa + & (merged_2steps['joker_multi'] == True) # noqa + ] + metrics['steps2_filtered_rows_1_strict'] = merged_2steps[ + ((merged_2steps['joker_1'] == False) | (merged_2steps['joker_2'] == False)) # noqa + & (merged_2steps['joker_multi'] == True) # noqa + ] + metrics['steps2_filtered_rows_2'] = merged_2steps[ + ((merged_2steps['joker_1'] == True) & (merged_2steps['joker_2'] == True)) # noqa + & (merged_2steps['joker_multi'] == False) # noqa + ] + metrics['steps2_filtered_rows_3'] = merged_2steps[ + ((merged_2steps['joker_1'] == False) | (merged_2steps['joker_2'] == False)) # noqa + & (merged_2steps['joker_multi'] == False) # noqa + ] + metrics['steps2_filtered_rows_4_loose'] = merged_2steps[ + ((merged_2steps['joker_1'] == True) | (merged_2steps['joker_2'] == True)) + & (merged_2steps['joker_multi'] == True) + ] + metrics['steps2_filtered_rows_4_strict'] = merged_2steps[ + ((merged_2steps['joker_1'] == True) & (merged_2steps['joker_2'] == True)) + & (merged_2steps['joker_multi'] == True) + ] + metrics['steps3_filtered_rows_1_loose'] = merged_3steps[ + ( + (merged_3steps['joker_1'] == False) + & (merged_3steps['joker_2'] == False) + & (merged_3steps['joker_3'] == False) + ) + & (merged_3steps['joker_multi'] == True) + ] + metrics['steps3_filtered_rows_1_strict'] = merged_3steps[ + ( + (merged_3steps['joker_1'] == False) + | (merged_3steps['joker_2'] == False) + | (merged_3steps['joker_3'] == False) + ) + & (merged_3steps['joker_multi'] == True) + ] + metrics['steps3_filtered_rows_2'] = merged_3steps[ + ((merged_3steps['joker_1'] == True) & (merged_3steps['joker_2'] == True) & (merged_3steps['joker_3'] == True)) + & (merged_3steps['joker_multi'] == False) + ] + metrics['steps3_filtered_rows_3'] = merged_3steps[ + ( + (merged_3steps['joker_1'] == False) + | (merged_3steps['joker_2'] == False) + | (merged_3steps['joker_3'] == False) + ) + & (merged_3steps['joker_multi'] == False) + ] + metrics['steps3_filtered_rows_4_loose'] = merged_3steps[ + ((merged_3steps['joker_1'] == True) | (merged_3steps['joker_2'] == True) | (merged_3steps['joker_3'] == True)) + & (merged_3steps['joker_multi'] == True) + ] + metrics['steps3_filtered_rows_4_strict'] = merged_3steps[ + ((merged_3steps['joker_1'] == True) & (merged_3steps['joker_2'] == True) & (merged_3steps['joker_3'] == True)) + & (merged_3steps['joker_multi'] == True) + ] + # metrics.to_csv("/Users/mac/Desktop/测试结果/error_anal/csv/gpt4o-0626.csv", index = False) + return metrics + + +# Function to compute evaluation rates and final scores +def evaluate_compute_final_scores(metrics, total_count): + total_counts = { + 'InadequateGeneralization': len(metrics['steps2_filtered_rows_2']) + len(metrics['steps3_filtered_rows_2']), + 'InsufficientKnowledge': len(metrics['steps2_filtered_rows_3']) + len(metrics['steps3_filtered_rows_3']), + 'CompleteMastery_loose': len(metrics['steps2_filtered_rows_4_loose']) + + len(metrics['steps3_filtered_rows_4_loose']), + 'CompleteMastery_strict': len(metrics['steps2_filtered_rows_4_strict']) + + len(metrics['steps3_filtered_rows_4_strict']), + 'RoteMemorization_loose': len(metrics['steps2_filtered_rows_1_loose']) + + len(metrics['steps3_filtered_rows_1_loose']), + 'RoteMemorization_strict': len(metrics['steps2_filtered_rows_1_strict']) + + len(metrics['steps3_filtered_rows_1_strict']), + } + rates = { + 'InadequateGeneralization_rate': "{:.2%}".format(total_counts['InadequateGeneralization'] / total_count), + 'InsufficientKnowledge_rate': "{:.2%}".format(total_counts['InsufficientKnowledge'] / total_count), + 'CompleteMastery_loose_rate': "{:.2%}".format(total_counts['CompleteMastery_loose'] / total_count), + 'CompleteMastery_strict_rate': "{:.2%}".format(total_counts['CompleteMastery_strict'] / total_count), + 'RoteMemorization_loose_rate': "{:.2%}".format( + total_counts['RoteMemorization_loose'] + / (total_counts['CompleteMastery_loose'] + total_counts['RoteMemorization_loose']) + ), + 'RoteMemorization_strict_rate': "{:.2%}".format( + total_counts['RoteMemorization_strict'] + / (total_counts['CompleteMastery_strict'] + total_counts['RoteMemorization_strict']) + ), + } + return total_counts, rates + + +# Function to update main results DataFrame +def evaluate_update_main_results_df(main_results_df, total_counts, rates): + + final_score_loose = "{:.2%}".format( + ( + 525 + - 0.5 * total_counts['InadequateGeneralization'] + - total_counts['RoteMemorization_loose'] + - total_counts['InsufficientKnowledge'] + ) + / 525 + ) + final_score_strict = "{:.2%}".format( + ( + 525 + - 0.5 * total_counts['InadequateGeneralization'] + - total_counts['RoteMemorization_strict'] + - total_counts['InsufficientKnowledge'] + ) + / 525 + ) + + new_row = { + # 'Model': model, + 'Score (Strict)': final_score_strict, + 'InsufficientKnowledge (Strict)': f"{rates['InsufficientKnowledge_rate']} ({total_counts['InsufficientKnowledge']})", + 'InadequateGeneralization (Strict)': f"{rates['InadequateGeneralization_rate']} ({total_counts['InadequateGeneralization']})", + 'CompleteMastery (Strict)': f"{rates['CompleteMastery_strict_rate']} ({total_counts['CompleteMastery_strict']})", + 'RoteMemorization (Strict)': f"{rates['RoteMemorization_strict_rate']} ({total_counts['RoteMemorization_strict']})", + 'Score (Loose)': final_score_loose, + 'InsufficientKnowledge (Loose)': f"{rates['InsufficientKnowledge_rate']} ({total_counts['InsufficientKnowledge']})", + 'InadequateGeneralization (Loose)': f"{rates['InadequateGeneralization_rate']} ({total_counts['InadequateGeneralization']})", + 'CompleteMastery (Loose)': f"{rates['CompleteMastery_loose_rate']} ({total_counts['CompleteMastery_loose']})", + 'RoteMemorization (Loose)': f"{rates['RoteMemorization_loose_rate']} ({total_counts['RoteMemorization_loose']})", + } + main_results_df = main_results_df._append(new_row, ignore_index=True) + return main_results_df + + +# Main function to evaluate models +def wemath_evaluate_models(output_json, main_results_csv_path=None): + + main_results_df = pd.DataFrame( + columns=[ + 'Model', + 'Score (Strict)', + 'InsufficientKnowledge (Strict)', + 'InadequateGeneralization (Strict)', + 'CompleteMastery (Strict)', + 'RoteMemorization (Strict)', + 'Score (Loose)', + 'InsufficientKnowledge (Loose)', + 'InadequateGeneralization (Loose)', + 'CompleteMastery (Loose)', + 'RoteMemorization (Loose)', + ] + ) + + # print(f"Evaluating model: {model_name}, JSON path: {output_json}") + data = load_and_process_data(output_json) + data_2steps = data[data['key'].str.contains('2steps')] + data_3steps = data[data['key'].str.contains('3steps')] + merged_2steps = evaluate_process_steps_data(data_2steps, 2) + merged_3steps = evaluate_process_steps_data(data_3steps, 3) + + metrics = evaluate_calculate_metrics(merged_2steps, merged_3steps) + total_counts, rates = evaluate_compute_final_scores(metrics, total_count=525) + + main_results_df = evaluate_update_main_results_df(main_results_df, total_counts, rates) + + print(main_results_df.to_string(index=False)) + if main_results_csv_path is not None: + main_results_df.to_csv(main_results_csv_path, index=False) + print("Evaluation completed and results saved to CSV.") + return main_results_df.to_dict() + + +### Accuracy.py +# Function to load knowledge structure nodes +def load_knowledge_structure_nodes(filepath): + # with open(filepath, "r") as file: + # nodes = json.load(file) + nodes = knowledge_structure_nodes + nodes = pd.DataFrame(nodes) + nodes['final_key'] = nodes['full node'].str.split('_').str[-1] + nodes['root_2'] = nodes['full node'].str.split('_').str[1] + return nodes + + +# Function to evaluate steps +def accuracy_evaluate_steps(json, steps, nodes): + jokers = [json[[f'joker_{i}', f'knowledge concept_{i}']] for i in range(1, steps + 1)] + for i in range(steps): + jokers[i] = pd.merge( + jokers[i], + nodes[['final_key', 'full node', 'root_2']], + left_on=f'knowledge concept_{i + 1}', + right_on='final_key', + how='left', + ) + jokers[i].rename( + columns={f'joker_{i + 1}': 'joker', f'knowledge concept_{i + 1}': 'knowledge_concept'}, + inplace=True, + ) + concatenated_steps = pd.concat(jokers, axis=0) + return concatenated_steps + + +# Function to process steps data and merge results +def accuracy_process_steps_data(df, steps): + steps_data = {f'{steps}steps_{i}': df[df['key'] == f'{steps}steps_{i}'] for i in range(1, steps + 1)} + steps_data[f'{steps}steps_multi'] = df[df['key'] == f'{steps}steps_multi'] + for key, data in steps_data.items(): + data.columns = [col + f'_{key.split("_")[-1]}' for col in data.columns] + merged_data = steps_data[f'{steps}steps_1'] + for i in range(2, steps + 1): + merged_data = pd.merge( + merged_data, steps_data[f'{steps}steps_{i}'], left_on=f'ID_1', right_on=f'ID_{i}', how='left' + ) + merged_data = pd.merge( + merged_data, steps_data[f'{steps}steps_multi'], left_on=f'ID_1', right_on='ID_multi', how='left' + ) + return merged_data + + +# Function to update main results DataFrame +def accuracy_update_main_results_df(nodes, main_results_df, concatenated_data, merged_2steps, merged_3steps): + One_step_acc = "{:.2%}".format(concatenated_data['joker'].mean()) + Two_step_acc = "{:.2%}".format(merged_2steps['joker_multi'].mean()) + Three_step_acc = "{:.2%}".format(merged_3steps['joker_multi'].mean()) + + new_row = { + # 'Model': model_name, + 'One-step(S1)': One_step_acc, + 'Two-step(S2)': Two_step_acc, + 'Three-step(S3)': Three_step_acc, + } + # Calculate rates according to Nodes + nodes['final_rode'] = nodes['full node'].str.split('_').str[-1] + csv_final_score = concatenated_data.groupby('final_key')['joker'].mean() + csv_final_score = pd.merge(nodes, csv_final_score, left_on='final_rode', right_on='final_key', how='left') + + new_row.update(csv_final_score.groupby('root2')['joker'].mean().apply(lambda x: "{:.2%}".format(x)).to_dict()) + main_results_df = main_results_df._append(new_row, ignore_index=True) + + return main_results_df + + +# Main function to evaluate models +def wemath_accuracy(output_json, main_results_csv_path=None): + + # nodes = load_knowledge_structure_nodes(knowledge_structure_nodes_path) + nodes = knowledge_structure_nodes + nodes = pd.DataFrame(nodes) + nodes['final_key'] = nodes['full node'].str.split('_').str[-1] + nodes['root_2'] = nodes['full node'].str.split('_').str[1] + + main_results_df = pd.DataFrame( + columns=[ + 'Model', + 'One-step(S1)', + 'Two-step(S2)', + 'Three-step(S3)', + 'Understanding and Conversion of Units', + 'Angles and Length', + 'Calculation of Plane Figures', + 'Understanding of Plane Figures', + 'Calculation of Solid Figures', + 'Understanding of Solid Figures', + 'Basic Transformations of Figures', + 'Cutting and Combining of Figures', + 'Direction', + 'Position', + 'Route Map', + 'Correspondence of Coordinates and Positions', + ] + ) + + # print(f"Evaluating model: {model_name}, JSON path: {output_json}") + data = load_and_process_data(output_json) + data_2steps = data[data['key'].str.contains('2steps')] + data_3steps = data[data['key'].str.contains('3steps')] + merged_2steps = accuracy_process_steps_data(data_2steps, 2) + merged_3steps = accuracy_process_steps_data(data_3steps, 3) + + concatenated_data = pd.concat( + [accuracy_evaluate_steps(merged_2steps, 2, nodes), accuracy_evaluate_steps(merged_3steps, 3, nodes)], + axis=0, + ) + main_results_df = accuracy_update_main_results_df( + nodes, main_results_df, concatenated_data, merged_2steps, merged_3steps + ) + + print(main_results_df.to_string(index=False)) + if main_results_csv_path is not None: + main_results_df.to_csv(main_results_csv_path, index=False) + print("Evaluation completed and results saved to CSV.") + + return main_results_df.to_dict() + + +knowledge_structure_nodes = [ + { + "root0": "Geometry and Figures", + "root1": "Measurement", + "root2": "Understanding and Conversion of Units", + "root3": "Conversion Rates and Calculations Between Area Units", + "root4": None, + "full node": "Measurement_Understanding and Conversion of Units_Conversion Rates and Calculations Between Area Units", + }, + { + "root0": "Geometry and Figures", + "root1": "Measurement", + "root2": "Understanding and Conversion of Units", + "root3": "Conversion Rates and Calculations Between Volume Units (Including Liters and Milliliters)", + "root4": None, + "full node": "Measurement_Understanding and Conversion of Units_Conversion Rates and Calculations Between Volume Units (Including Liters and Milliliters)", + }, + { + "root0": "Geometry and Figures", + "root1": "Measurement", + "root2": "Understanding and Conversion of Units", + "root3": "Conversion Rates and Calculations Between Length Units", + "root4": None, + "full node": "Measurement_Understanding and Conversion of Units_Conversion Rates and Calculations Between Length Units", + }, + { + "root0": "Geometry and Figures", + "root1": "Measurement", + "root2": "Angles and Length", + "root3": "Understanding Angles (Using a Protractor)", + "root4": None, + "full node": "Measurement_Angles and Length_Understanding Angles (Using a Protractor)", + }, + { + "root0": "Geometry and Figures", + "root1": "Measurement", + "root2": "Angles and Length", + "root3": "Understanding Length (Using a Ruler)", + "root4": None, + "full node": "Measurement_Angles and Length_Understanding Length (Using a Ruler)", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Calculation of Solid Figures", + "root3": "Calculation of Surface Area of Solid Figures", + "root4": "Surface Area of Cylinders", + "full node": "Solid Figures_Calculation of Solid Figures_Calculation of Surface Area of Solid Figures_Surface Area of Cylinders", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Calculation of Solid Figures", + "root3": "Calculation of Surface Area of Solid Figures", + "root4": "Surface Area of Rectangular Cuboids", + "full node": "Solid Figures_Calculation of Solid Figures_Calculation of Surface Area of Solid Figures_Surface Area of Rectangular Cuboids", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Calculation of Solid Figures", + "root3": "Calculation of Surface Area of Solid Figures", + "root4": "Surface Area of Cubes", + "full node": "Solid Figures_Calculation of Solid Figures_Calculation of Surface Area of Solid Figures_Surface Area of Cubes", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Calculation of Solid Figures", + "root3": "Calculation of Volume of Solid Figures", + "root4": "Volume and Capacity of Cylinders", + "full node": "Solid Figures_Calculation of Solid Figures_Calculation of Volume of Solid Figures_Volume and Capacity of Cylinders", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Calculation of Solid Figures", + "root3": "Calculation of Volume of Solid Figures", + "root4": "Volume and Capacity of Cones", + "full node": "Solid Figures_Calculation of Solid Figures_Calculation of Volume of Solid Figures_Volume and Capacity of Cones", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Calculation of Solid Figures", + "root3": "Calculation of Volume of Solid Figures", + "root4": "Volume and Capacity of Rectangular Cuboids", + "full node": "Solid Figures_Calculation of Solid Figures_Calculation of Volume of Solid Figures_Volume and Capacity of Rectangular Cuboids", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Calculation of Solid Figures", + "root3": "Calculation of Volume of Solid Figures", + "root4": "Volume and Capacity of Cubes", + "full node": "Solid Figures_Calculation of Solid Figures_Calculation of Volume of Solid Figures_Volume and Capacity of Cubes", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Understanding of Solid Figures", + "root3": "Expanded View of Solids", + "root4": "Expanded View of Cylinders", + "full node": "Solid Figures_Understanding of Solid Figures_Expanded View of Solids_Expanded View of Cylinders", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Understanding of Solid Figures", + "root3": "Expanded View of Solids", + "root4": "Expanded View of Rectangular Cuboids", + "full node": "Solid Figures_Understanding of Solid Figures_Expanded View of Solids_Expanded View of Rectangular Cuboids", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Understanding of Solid Figures", + "root3": "Expanded View of Solids", + "root4": "Expanded View of Cubes", + "full node": "Solid Figures_Understanding of Solid Figures_Expanded View of Solids_Expanded View of Cubes", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Understanding of Solid Figures", + "root3": "Cylinders and Cones", + "root4": "Properties of Cylinders", + "full node": "Solid Figures_Understanding of Solid Figures_Cylinders and Cones_Properties of Cylinders", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Understanding of Solid Figures", + "root3": "Cylinders and Cones", + "root4": "Properties of Cones", + "full node": "Solid Figures_Understanding of Solid Figures_Cylinders and Cones_Properties of Cones", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Understanding of Solid Figures", + "root3": "Rectangular Cuboids and Cubes", + "root4": "Properties and Understanding of Rectangular Cuboids", + "full node": "Solid Figures_Understanding of Solid Figures_Rectangular Cuboids and Cubes_Properties and Understanding of Rectangular Cuboids", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Understanding of Solid Figures", + "root3": "Rectangular Cuboids and Cubes", + "root4": "Properties and Understanding of Cubes", + "full node": "Solid Figures_Understanding of Solid Figures_Rectangular Cuboids and Cubes_Properties and Understanding of Cubes", + }, + { + "root0": "Geometry and Figures", + "root1": "Solid Figures", + "root2": "Understanding of Solid Figures", + "root3": "Observing Objects", + "root4": None, + "full node": "Solid Figures_Understanding of Solid Figures_Observing Objects", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Sum of Interior Angles of Polygons", + "root4": "Sum of Interior Angles of Other Polygons", + "full node": "Plane Figures_Calculation of Plane Figures_Sum of Interior Angles of Polygons_Sum of Interior Angles of Other Polygons", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Sum of Interior Angles of Polygons", + "root4": "Sum of Interior Angles of Triangles", + "full node": "Plane Figures_Calculation of Plane Figures_Sum of Interior Angles of Polygons_Sum of Interior Angles of Triangles", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation and Comparison of Angles", + "root4": None, + "full node": "Plane Figures_Calculation of Plane Figures_Calculation and Comparison of Angles", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation of Areas", + "root4": "Area of Parallelograms", + "full node": "Plane Figures_Calculation of Plane Figures_Calculation of Areas_Area of Parallelograms", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation of Areas", + "root4": "Area of Triangles", + "full node": "Plane Figures_Calculation of Plane Figures_Calculation of Areas_Area of Triangles", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation of Areas", + "root4": "Area of Sectors", + "full node": "Plane Figures_Calculation of Plane Figures_Calculation of Areas_Area of Sectors", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation of Areas", + "root4": "Area of Trapezoids", + "full node": "Plane Figures_Calculation of Plane Figures_Calculation of Areas_Area of Trapezoids", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation of Areas", + "root4": "Area of Circles", + "full node": "Plane Figures_Calculation of Plane Figures_Calculation of Areas_Area of Circles", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation of Areas", + "root4": "Area of Rectangles", + "full node": "Plane Figures_Calculation of Plane Figures_Calculation of Areas_Area of Rectangles", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation of Areas", + "root4": "Area of Squares", + "full node": "Plane Figures_Calculation of Plane Figures_Calculation of Areas_Area of Squares", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation of Perimeters", + "root4": "Perimeter of Parallelograms", + "full node": "Plane Figures_Calculation of Plane Figures_Calculation of Perimeters_Perimeter of Parallelograms", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation of Perimeters", + "root4": "Perimeter of Triangles", + "full node": "Plane Figures_Calculation of Plane Figures_Calculation of Perimeters_Perimeter of Triangles", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation of Perimeters", + "root4": "Perimeter of Trapezoids", + "full node": "Plane Figures_Calculation of Plane Figures_Calculation of Perimeters_Perimeter of Trapezoids", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation of Perimeters", + "root4": "Circumference of Circles", + "full node": "Plane Figures_Calculation of Plane Figures_Calculation of Perimeters_Circumference of Circles", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation of Perimeters", + "root4": "Perimeter of Rectangles", + "full node": "Plane Figures_Calculation of Plane Figures_Calculation of Perimeters_Perimeter of Rectangles", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Calculation of Plane Figures", + "root3": "Calculation of Perimeters", + "root4": "Perimeter of Squares", + "full node": "Plane Figures_Calculation of Plane Figures_Calculation of Perimeters_Perimeter of Squares", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Polygons", + "root4": "Properties and Understanding of Parallelograms", + "full node": "Plane Figures_Understanding of Plane Figures_Polygons_Properties and Understanding of Parallelograms", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Polygons", + "root4": "Properties and Understanding of Triangles", + "full node": "Plane Figures_Understanding of Plane Figures_Polygons_Properties and Understanding of Triangles", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Polygons", + "root4": "Properties and Understanding of Trapezoids", + "full node": "Plane Figures_Understanding of Plane Figures_Polygons_Properties and Understanding of Trapezoids", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Polygons", + "root4": "Properties and Understanding of Rectangles", + "full node": "Plane Figures_Understanding of Plane Figures_Polygons_Properties and Understanding of Rectangles", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Polygons", + "root4": "Properties and Understanding of Squares", + "full node": "Plane Figures_Understanding of Plane Figures_Polygons_Properties and Understanding of Squares", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Classification and Understanding of Angles", + "root4": "Understanding Triangular Rulers", + "full node": "Plane Figures_Understanding of Plane Figures_Classification and Understanding of Angles_Understanding Triangular Rulers", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Classification and Understanding of Angles", + "root4": "Understanding and Representing Angles", + "full node": "Plane Figures_Understanding of Plane Figures_Classification and Understanding of Angles_Understanding and Representing Angles", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Properties and Understanding of Line Segments", + "root4": "Distance Between Two Points", + "full node": "Plane Figures_Understanding of Plane Figures_Properties and Understanding of Line Segments_Distance Between Two Points", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Properties and Understanding of Line Segments", + "root4": "Understanding Line Segments, Lines, and Rays", + "full node": "Plane Figures_Understanding of Plane Figures_Properties and Understanding of Line Segments_Understanding Line Segments, Lines, and Rays", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Positional Relationships Between Line Segments", + "root4": "perpendicularity", + "full node": "Plane Figures_Understanding of Plane Figures_Positional Relationships Between Line Segments_perpendicularity", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Positional Relationships Between Line Segments", + "root4": "Parallel", + "full node": "Plane Figures_Understanding of Plane Figures_Positional Relationships Between Line Segments_Parallel", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Circles and Sectors", + "root4": "Understanding Sectors", + "full node": "Plane Figures_Understanding of Plane Figures_Circles and Sectors_Understanding Sectors", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Circles and Sectors", + "root4": "Understanding Circles", + "full node": "Plane Figures_Understanding of Plane Figures_Circles and Sectors_Understanding Circles", + }, + { + "root0": "Geometry and Figures", + "root1": "Plane Figures", + "root2": "Understanding of Plane Figures", + "root3": "Observing Figures", + "root4": None, + "full node": "Plane Figures_Understanding of Plane Figures_Observing Figures", + }, + { + "root0": "Geometry and Figures", + "root1": "Transformation and Motion of Figures", + "root2": "Basic Transformations of Figures", + "root3": "Axial Symmetry", + "root4": None, + "full node": "Transformation and Motion of Figures_Basic Transformations of Figures_Axial Symmetry", + }, + { + "root0": "Geometry and Figures", + "root1": "Transformation and Motion of Figures", + "root2": "Basic Transformations of Figures", + "root3": "Translation", + "root4": None, + "full node": "Transformation and Motion of Figures_Basic Transformations of Figures_Translation", + }, + { + "root0": "Geometry and Figures", + "root1": "Transformation and Motion of Figures", + "root2": "Basic Transformations of Figures", + "root3": "Rotation", + "root4": None, + "full node": "Transformation and Motion of Figures_Basic Transformations of Figures_Rotation", + }, + { + "root0": "Geometry and Figures", + "root1": "Transformation and Motion of Figures", + "root2": "Cutting and Combining of Figures", + "root3": "Combining and Dividing Solids", + "root4": None, + "full node": "Transformation and Motion of Figures_Cutting and Combining of Figures_Combining and Dividing Solids", + }, + { + "root0": "Geometry and Figures", + "root1": "Transformation and Motion of Figures", + "root2": "Cutting and Combining of Figures", + "root3": "Combining Plane Figures", + "root4": "Division of Plane Figures", + "full node": "Transformation and Motion of Figures_Cutting and Combining of Figures_Combining Plane Figures_Division of Plane Figures", + }, + { + "root0": "Geometry and Figures", + "root1": "Transformation and Motion of Figures", + "root2": "Cutting and Combining of Figures", + "root3": "Combining Plane Figures", + "root4": "Combining Plane Figures", + "full node": "Transformation and Motion of Figures_Cutting and Combining of Figures_Combining Plane Figures_Combining Plane Figures", + }, + { + "root0": "Geometry and Figures", + "root1": "Transformation and Motion of Figures", + "root2": "Cutting and Combining of Figures", + "root3": "Combining Plane Figures", + "root4": "Tessellation of Figures", + "full node": "Transformation and Motion of Figures_Cutting and Combining of Figures_Combining Plane Figures_Tessellation of Figures", + }, + { + "root0": "Geometry and Figures", + "root1": "Transformation and Motion of Figures", + "root2": "Cutting and Combining of Figures", + "root3": "Combining Plane Figures", + "root4": "Folding Problems of Figures", + "full node": "Transformation and Motion of Figures_Cutting and Combining of Figures_Combining Plane Figures_Folding Problems of Figures", + }, + { + "root0": "Geometry and Figures", + "root1": "Position and Direction", + "root2": "Direction", + "root3": "Southeast, Southwest, Northeast, Northwest Directions", + "root4": None, + "full node": "Position and Direction_Direction_Southeast, Southwest, Northeast, Northwest Directions", + }, + { + "root0": "Geometry and Figures", + "root1": "Position and Direction", + "root2": "Direction", + "root3": "Cardinal Directions (East, South, West, North)", + "root4": None, + "full node": "Position and Direction_Direction_Cardinal Directions (East, South, West, North)", + }, + { + "root0": "Geometry and Figures", + "root1": "Position and Direction", + "root2": "Route Map", + "root3": "Determining the Positions of Objects Based on Direction, Angle, and Distance", + "root4": None, + "full node": "Position and Direction_Route Map_Determining the Positions of Objects Based on Direction, Angle, and Distance", + }, + { + "root0": "Geometry and Figures", + "root1": "Position and Direction", + "root2": "Route Map", + "root3": "Describing Simple Routes Based on Direction and Distance", + "root4": None, + "full node": "Position and Direction_Route Map_Describing Simple Routes Based on Direction and Distance", + }, + { + "root0": "Geometry and Figures", + "root1": "Position and Direction", + "root2": "Correspondence of Coordinates and Positions", + "root3": "Representing Positions Using Ordered Pairs", + "root4": None, + "full node": "Position and Direction_Correspondence of Coordinates and Positions_Representing Positions Using Ordered Pairs", + }, + { + "root0": "Geometry and Figures", + "root1": "Position and Direction", + "root2": "Correspondence of Coordinates and Positions", + "root3": "Finding Positions Based on Ordered Pairs", + "root4": None, + "full node": "Position and Direction_Correspondence of Coordinates and Positions_Finding Positions Based on Ordered Pairs", + }, + { + "root0": "Geometry and Figures", + "root1": "Position and Direction", + "root2": "Position", + "root3": "Front-Back Position", + "root4": None, + "full node": "Position and Direction_Position_Front-Back Position", + }, + { + "root0": "Geometry and Figures", + "root1": "Position and Direction", + "root2": "Position", + "root3": "Up-Down Position", + "root4": None, + "full node": "Position and Direction_Position_Up-Down Position", + }, + { + "root0": "Geometry and Figures", + "root1": "Position and Direction", + "root2": "Position", + "root3": "Left-Right Position", + "root4": None, + "full node": "Position and Direction_Position_Left-Right Position", + }, +] diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/yorn.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/yorn.py new file mode 100644 index 0000000000000000000000000000000000000000..0278e6664cbaab0578de53032e8468b1b6348b96 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/yorn.py @@ -0,0 +1,267 @@ +from ...smp import * + + +def AMBER_rating(data_file): + data = load(data_file) + stats = defaultdict(dict) + lt = len(data) + category_mapping = { + 'discriminative-attribute-state': 'Attribute', + 'discriminative-attribute-number': 'Attribute', + 'discriminative-attribute-action': 'Attribute', + 'discriminative-hallucination': 'Existence', + 'discriminative-relation': 'Relation', + 'relation': 'Relation' + } + + for i in range(lt): + item = data.iloc[i] + category = item['category'] + image_path = item['image_path'] + score = item['score'] + + new_category = category_mapping.get(category, category) + + if image_path not in stats[new_category]: + stats[new_category][image_path] = [] + stats[new_category][image_path].append(score) + + def acc(key): + res = stats[key] + values = [] + for val in res.values(): + values.extend(val) + return np.mean(values) * 100 + + scores = {} + for k in stats: + scores[k] = acc(k) + + scores['Avg ACC'] = np.mean(list(scores.values())) + ret = d2df(scores) + return ret + + +def MME_rating(data_file): + data = load(data_file) + stats = defaultdict(dict) + lt = len(data) + for i in range(lt): + item = data.iloc[i] + category = item['category'] + image_path = item['image_path'] + score = item['score'] + if image_path not in stats[category]: + stats[category][image_path] = [] + stats[category][image_path].append(score) + + def acc(key, mode='normal'): + res = stats[key] + values = [] + for val in res.values(): + if mode == 'normal': + values.extend(val) + elif mode == 'plus': + values.append(val[0] * val[1]) + return np.mean(values) * 100 + + scores = {} + for k in stats: + scores[k] = acc(k) + acc(k, 'plus') + + super_cates = dict( + perception=[ + 'OCR', 'artwork', 'celebrity', 'color', 'count', 'existence', + 'landmark', 'position', 'posters', 'scene' + ], + reasoning=['code_reasoning', 'commonsense_reasoning', 'numerical_calculation', 'text_translation'] + ) + + ret = {} + for sc, cate_list in super_cates.items(): + base = 0 + for c in cate_list: + base += scores[c] + ret[sc] = base + ret.update(scores) + ret = d2df(ret) + return ret + + +def Hallusion_rating(data_file): + def calc_fAcc(data): + res = defaultdict(list) + lt = len(data) + for i in range(lt): + line = data.iloc[i] + res[f"{line['l2-category']}_{line['set_id']}_{line['figure_id']}"].append(line['score']) + return np.mean([np.all(x) for x in res.values()]) * 100 + + def calc_qAcc(data): + res = defaultdict(list) + lt = len(data) + for i in range(lt): + line = data.iloc[i] + res[f"{line['l2-category']}_{line['set_id']}_{line['question_id']}"].append(line['score']) + return np.mean([np.all(x) for x in res.values()]) * 100 + + def calc_aAcc(data): + return np.mean(data['score']) * 100 + + data = load(data_file) + data['set_id'] = [x.split('_')[3] for x in data['index']] + data['figure_id'] = [x.split('_')[4] for x in data['index']] + data['question_id'] = [x.split('_')[5] for x in data['index']] + + res = dict(split=[], aAcc=[], fAcc=[], qAcc=[]) + res['split'].append('Overall') + res['aAcc'].append(calc_aAcc(data)) + res['fAcc'].append(calc_fAcc(data)) + res['qAcc'].append(calc_qAcc(data)) + + if 'category' in data: + cates = list(set(data['category'])) + for c in cates: + sub = data[data['category'] == c] + res['split'].append(c) + res['aAcc'].append(calc_aAcc(sub)) + res['fAcc'].append(calc_fAcc(sub)) + res['qAcc'].append(calc_qAcc(sub)) + + if 'l2-category' in data: + cates = list(set(data['l2-category'])) + for c in cates: + sub = data[data['l2-category'] == c] + res['split'].append(c) + res['aAcc'].append(calc_aAcc(sub)) + res['fAcc'].append(calc_fAcc(sub)) + res['qAcc'].append(calc_qAcc(sub)) + ret = pd.DataFrame(res) + return ret + + +def POPE_rating(data_file): + def cal_f1_score(y_true, y_pred): + tp = sum((y_true == 1) & (y_pred == 1)) + fp = sum((y_true == 0) & (y_pred == 1)) + fn = sum((y_true == 1) & (y_pred == 0)) + + precision = tp / (tp + fp) if (tp + fp) != 0 else 0 + recall = tp / (tp + fn) if (tp + fn) != 0 else 0 + f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) != 0 else 0 + return f1_score, precision, recall + + data = load(data_file) + data = data.assign(category=data['category'].str.split(',')).explode('category') + data['index'] = range(len(data)) + res = dict(split=[], Overall=[], acc=[], precision=[], recall=[]) + y_true = np.array([1 if i == 'Yes' else 0 for i in data['answer']]) + y_pred = np.array([1 if i == 'Yes' else 0 for i in data['extracted']]) + f1_score, precision, recall = cal_f1_score(y_true, y_pred) + res['split'].append('Overall') + res['Overall'].append(f1_score * 100) + res['acc'].append(np.mean(data['score']) * 100) + res['precision'].append(precision * 100) + res['recall'].append(recall * 100) + + if 'category' in data: + cates = list(set(data['category'])) + cates = [c for c in cates if not pd.isna(c)] + for c in cates: + sub = data[data['category'] == c] + y_true = np.array([1 if i == 'Yes' else 0 for i in sub['answer']]) + y_pred = np.array([1 if i == 'Yes' else 0 for i in sub['extracted']]) + f1_score, precision, recall = cal_f1_score(y_true, y_pred) + res['split'].append(c) + res['Overall'].append(f1_score * 100) + res['acc'].append(np.mean(sub['score']) * 100) + res['precision'].append(precision * 100) + res['recall'].append(recall * 100) + + ret = pd.DataFrame(res) + return ret + + +def VSR_rating(data_file): + def cal_f1_score(y_true, y_pred): + tp = sum((y_true == 1) & (y_pred == 1)) + fp = sum((y_true == 0) & (y_pred == 1)) + fn = sum((y_true == 1) & (y_pred == 0)) + + precision = tp / (tp + fp) if (tp + fp) != 0 else 0 + recall = tp / (tp + fn) if (tp + fn) != 0 else 0 + f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) != 0 else 0 + return f1_score, precision, recall + data = load(data_file) + res = {} + y_true = np.array([1 if i == 'Yes' else 0 for i in data['answer']]) + y_pred = np.array([1 if i == 'Yes' else 0 for i in data['extracted']]) + f1_score, precision, recall = cal_f1_score(y_true, y_pred) + res['precision'] = precision * 100 + res['recall'] = recall * 100 + res['acc'] = np.mean(data['score']) * 100 + res['f1_score'] = f1_score + ret = d2df(res) + return ret + + +def default_rating(data_file): + data = load(data_file) + res = {} + res['Overall'] = np.mean(data['score']) * 100 + if 'category' in data: + cates = list(set(data['category'])) + cates = [c for c in cates if not pd.isna(c)] + cates.sort() + for c in cates: + sub = data[data['category'] == c] + res[c] = np.mean(sub['score']) * 100 + if 'l2-category' in data: + cates = list(set(data['l2-category'])) + cates = [c for c in cates if not pd.isna(c)] + cates.sort() + for c in cates: + sub = data[data['l2-category'] == c] + res[c] = np.mean(sub['score']) * 100 + ret = d2df(res) + return ret + + +def YOrN_match_prompt(line): + tmpl = ( + 'You are an AI assistant who will help me to match an answer with two options of a question. ' + 'The options are only Yes / No. ' + 'You are provided with a question and an answer, ' + 'and you need to find which option (Yes / No) is most similar to the answer. ' + 'If the meaning of all options are significantly different from the answer, output Unknown. ' + 'Your should output a single word among the following 3 choices: Yes, No, Unknown.\n' + 'Example 1: \n' + "Question: Is the word in this image 'Hello'?\nAnswer: The word in this image is 'Hello'.\nYour output: Yes\n" + 'Example 2: \n' + "Question: Is the word in this image 'Hello'?\n" + "Answer: The word in this image is not 'Hello'.\nYour output: No\n" + 'Example 3: \n' + 'Question: {}?\nAnswer: {}\nYour output: ' + ) + return tmpl.format(line['question'], line['prediction']) + + +def YOrN_Extraction(output): + s = output.lower() + words = process_punctuation(s).split() + if 'yes' in words and 'no' not in words: + return 'Yes' + if 'yes' not in words and 'no' in words: + return 'No' + return 'Unknown' + + +def YOrN_auxeval(model, line): + prompt = YOrN_match_prompt(line) + retry = 5 + for i in range(retry): + output = model.generate(prompt, temperature=0.5 * i) + ans = YOrN_Extraction(output) + if ans != 'Unknown': + return ans + return 'Unknown' diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/video_holmes.py b/VLMEvalKit-sudoku/vlmeval/dataset/video_holmes.py new file mode 100644 index 0000000000000000000000000000000000000000..3d6ff37b245d02760b5577bcce433a365655de76 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/video_holmes.py @@ -0,0 +1,255 @@ +from huggingface_hub import snapshot_download +from ..smp import * +from ..smp.file import get_intermediate_file_path, get_file_extension +from .video_base import VideoBaseDataset +from .utils import build_judge, DEBUG_MESSAGE + +FAIL_MSG = 'Failed to obtain answer via API.' + + +def unwrap_hf_pkl(pth, suffix='.mp4'): + base_dir = os.path.join(pth, 'video_pkl/') + target_dir = os.path.join(pth, 'video/') + pickle_files = [os.path.join(base_dir, file) for file in os.listdir(base_dir)] + pickle_files.sort() + + if not os.path.exists(target_dir): + os.makedirs(target_dir, exist_ok=True) + for pickle_file in pickle_files: + with open(pickle_file, 'rb') as file: + video_data = pickle.load(file) + # For each video file in the pickle file, write its contents to a new mp4 file + for video_name, video_content in video_data.items(): + output_path = os.path.join(target_dir, f'{video_name}{suffix}') + with open(output_path, 'wb') as output_file: + output_file.write(video_content) + print('The video file has been restored and stored from the pickle file.') + else: + print('The video file already exists.') + + +class Video_Holmes(VideoBaseDataset): + + MD5 = '85bdd91f9b29a99354c23b97ab7c113c' + SYS = '' + + QUESTION_TMPL = """ + Based on the given video, reason and answer the single-choice question. Provide your reasoning between the and tags, and then give your final answer between the and tags. \ + The question is: {}. The options are: {}. \ + Your answer: + """ # noqa: E501 + + TYPE = 'Video-MCQ' + + def __init__(self, dataset='Video_Holmes', nframe=32, fps=-1): + super().__init__(dataset=dataset, nframe=nframe, fps=fps) + self.dataset_name = dataset + + @classmethod + def supported_datasets(cls): + return ['Video_Holmes'] + + def prepare_dataset(self, dataset_name='Video_Holmes', repo_id='TencentARC/Video-Holmes'): + + def check_integrity(pth): + data_file = osp.join(pth, f'{dataset_name}.tsv') + if not os.path.exists(data_file): + return False + + if md5(data_file) != self.MD5: + return False + data = load(data_file) + for video_pth in data['video_path']: + if not osp.exists(osp.join(pth, video_pth)): + return False + return True + + cache_path = get_cache_path(repo_id) + if cache_path is not None and check_integrity(cache_path): + dataset_path = cache_path + else: + def unzip_hf_zip(pth): + import zipfile + base_dir = pth + target_dir = os.path.join(pth, 'video/') + zip_files = [ + os.path.join(base_dir, file) for file in os.listdir(base_dir) + if file == "videos.zip" + ] + zip_files.sort() + + if not os.path.exists(target_dir): + os.makedirs(target_dir, exist_ok=True) + for zip_file in zip_files: + with zipfile.ZipFile(zip_file, 'r') as zip_ref: + for member in zip_ref.namelist(): + # Check if the member is a file (not a directory) + if not member.endswith('/'): + # Extract the file to the specified directory + source = zip_ref.open(member) + target = open(os.path.join(target_dir, os.path.basename(member)), 'wb') + with source, target: + target.write(source.read()) + print('The video file has been restored and stored from the zip file.') + else: + print('The video file already exists.') + + def generate_tsv(pth): + + data_file = osp.join(pth, f'{dataset_name}.tsv') + if os.path.exists(data_file) and md5(data_file) == self.MD5: + return + + with open(os.path.join(pth, 'test_Video-Holmes.json'), 'r', encoding='utf-8') as f: + data = json.load(f) + + rows = [] + + for idx, item in enumerate(data): + + video_id = item.get('video ID') + options = item.get('Options', {}) + candidates = [f"{k}. {options.get(k, '')}".replace("'","") + for k in ['A', 'B', 'C', 'D', 'E', 'F'] if k in options] + row = { + 'index': idx, + 'video': video_id, + 'video_path': f'./video/{video_id}.mp4', + 'candidates': candidates, + 'question': item.get('Question', ''), + 'answer': item.get('Answer', ''), + 'question_id': item.get('Question ID', ''), + 'question_type': item.get('Question Type', ''), + 'explanation': item.get('Explanation', ''), + } + rows.append(row) + + df = pd.DataFrame(rows) + columns = ['index', 'video', 'video_path', 'candidates', + 'question', 'answer', 'question_id', 'question_type', 'explanation'] + df = df[columns] + df.to_csv(data_file, sep='\t', index=False) + print("Generate tsv file OK") + + if modelscope_flag_set(): + from modelscope import dataset_snapshot_download + dataset_path = dataset_snapshot_download(dataset_id=repo_id) + else: + dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset') + + unzip_hf_zip(dataset_path) + generate_tsv(dataset_path) + + data_file = osp.join(dataset_path, f'{dataset_name}.tsv') + return dict(data_file=data_file, root=dataset_path) + + def save_video_frames(self, video, video_llm=False): + + vid_path = osp.join(self.data_root, 'video', video + '.mp4') + import decord + vid = decord.VideoReader(vid_path) + video_info = { + 'fps': vid.get_avg_fps(), + 'n_frames': len(vid), + } + if self.nframe > 0 and self.fps < 0: + step_size = len(vid) / (self.nframe + 1) + indices = [int(i * step_size) for i in range(1, self.nframe + 1)] + frame_paths = self.frame_paths(video) + elif self.fps > 0: + # not constrained by num_frames, get frames by fps + total_duration = video_info['n_frames'] / video_info['fps'] + required_frames = int(total_duration * self.fps) + step_size = video_info['fps'] / self.fps + indices = [int(i * step_size) for i in range(required_frames)] + frame_paths = self.frame_paths_fps(video, len(indices)) + + flag = np.all([osp.exists(p) for p in frame_paths]) + + if not flag: + lock_path = osp.splitext(vid_path)[0] + '.lock' + with portalocker.Lock(lock_path, 'w', timeout=30): + if not np.all([osp.exists(p) for p in frame_paths]): + images = [vid[i].asnumpy() for i in indices] + images = [Image.fromarray(arr) for arr in images] + for im, pth in zip(images, frame_paths): + if not osp.exists(pth): + im.save(pth) + + return frame_paths, indices, video_info + + def build_prompt(self, line, video_llm): + + if isinstance(line, int): + assert line < len(self) + line = self.data.iloc[line] + + frames, indices, video_info = self.save_video_frames(line['video'], video_llm) + + message = [dict(type='text', value=self.SYS)] + + if video_llm: + message.append(dict(type='video', value=osp.join(self.data_root, 'video', line['video'] + '.mp4'))) + else: + for im in frames: + message.append(dict(type='image', value=im)) + + text_prompt = self.QUESTION_TMPL.format(line['question'],line['candidates']) + message.append(dict(type='text', value=text_prompt)) + # print(f"Build message OK {line[1]}") + return message + + # It returns a dictionary + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + + from .utils.videoholmes import get_dimension_rating, extract_option + + assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file' # noqa: E501 + + tmp_file = get_intermediate_file_path(eval_file, '_tmp', 'pkl') + tgt_file = get_intermediate_file_path(eval_file, '_rating', 'json') + score_file = get_intermediate_file_path(eval_file, '_score') + + if not osp.exists(score_file): + model = judge_kwargs.get('model', 'exact_matching') + assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125'] + + if model == 'exact_matching': + model = None + elif gpt_key_set(): + model = build_judge(**judge_kwargs) + if not model.working(): + warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation') + warnings.warn(DEBUG_MESSAGE) + model = None + else: + warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation') + model = None + res = {} if not osp.exists(tmp_file) else load(tmp_file) + res = {k: v for k, v in res.items() if FAIL_MSG not in v} + + data = load(eval_file) + data_un = data[~pd.isna(data['prediction'])] + + for idx in data['index']: + ans = data.loc[data['index'] == idx, 'answer'].values[0] + pred = str(data.loc[data['index'] == idx, 'prediction'].values[0]) + + predicted_answer = extract_option(pred) + + data.loc[idx, 'score'] = int(predicted_answer == ans) + + rejected = [x for x in data['score'] if x == -1] + + print( + f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(data_un)} questions, ' + f'failed to obtain the score for another {len(rejected)} questions. ' + f'Those questions will be counted as -1 score in ALL rating, and will not be counted in VALID rating.' + ) + + dump(data, score_file) + + rating = get_dimension_rating(score_file) + dump(rating, tgt_file) + return rating diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/videomme.py b/VLMEvalKit-sudoku/vlmeval/dataset/videomme.py new file mode 100644 index 0000000000000000000000000000000000000000..84a20eeb45b574f5e625a440a5da76063ef13711 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/videomme.py @@ -0,0 +1,288 @@ +from huggingface_hub import snapshot_download +from ..smp import * +from ..smp.file import get_intermediate_file_path, get_file_extension +from .video_base import VideoBaseDataset +from .utils import build_judge, DEBUG_MESSAGE + +FAIL_MSG = 'Failed to obtain answer via API.' + + +def unwrap_hf_pkl(pth, suffix='.mp4'): + base_dir = os.path.join(pth, 'video_pkl/') + target_dir = os.path.join(pth, 'video/') + pickle_files = [os.path.join(base_dir, file) for file in os.listdir(base_dir)] + pickle_files.sort() + + if not os.path.exists(target_dir): + os.makedirs(target_dir, exist_ok=True) + for pickle_file in pickle_files: + with open(pickle_file, 'rb') as file: + video_data = pickle.load(file) + # For each video file in the pickle file, write its contents to a new mp4 file + for video_name, video_content in video_data.items(): + output_path = os.path.join(target_dir, f'{video_name}{suffix}') + with open(output_path, 'wb') as output_file: + output_file.write(video_content) + print('The video file has been restored and stored from the pickle file.') + else: + print('The video file already exists.') + + +class VideoMME(VideoBaseDataset): + + MD5 = '85bdd91f9b29a99354c23b97ab7c113c' + SYS = '' + + FRAMES_TMPL_NOSUB = """ +These are the frames of a video. \ +Select the best answer to the following multiple-choice question based on the video. \ +Respond with only the letter (A, B, C, or D) of the correct option. +""" + + FRAMES_TMPL_SUB = """ +These are the frames of a video. \ +This video's subtitles are listed below: +{} +Select the best answer to the following multiple-choice question based on the video. \ +Respond with only the letter (A, B, C, or D) of the correct option. +""" + + TYPE = 'Video-MCQ' + + def __init__(self, dataset='Video-MME', use_subtitle=False, nframe=0, fps=-1): + super().__init__(dataset=dataset, nframe=nframe, fps=fps) + self.use_subtitle = use_subtitle + self.dataset_name = dataset + + @classmethod + def supported_datasets(cls): + return ['Video-MME'] + + def prepare_dataset(self, dataset_name='Video-MME', repo_id='lmms-lab/Video-MME'): + + def check_integrity(pth): + data_file = osp.join(pth, f'{dataset_name}.tsv') + + if not os.path.exists(data_file): + return False + + if md5(data_file) != self.MD5: + return False + data = load(data_file) + for video_pth in data['video_path']: + if not osp.exists(osp.join(pth, video_pth)): + return False + return True + + cache_path = get_cache_path(repo_id) + if cache_path is not None and check_integrity(cache_path): + dataset_path = cache_path + else: + + def unzip_hf_zip(pth): + import zipfile + base_dir = pth + target_dir = os.path.join(pth, 'video/') + zip_files = [ + os.path.join(base_dir, file) for file in os.listdir(base_dir) + if file.endswith('.zip') and file.startswith('video') + ] + zip_files.sort() + + if not os.path.exists(target_dir): + os.makedirs(target_dir, exist_ok=True) + for zip_file in zip_files: + with zipfile.ZipFile(zip_file, 'r') as zip_ref: + for member in zip_ref.namelist(): + # Check if the member is a file (not a directory) + if not member.endswith('/'): + # Extract the file to the specified directory + source = zip_ref.open(member) + target = open(os.path.join(target_dir, os.path.basename(member)), 'wb') + with source, target: + target.write(source.read()) + print('The video file has been restored and stored from the zip file.') + else: + print('The video file already exists.') + + subtitle_zip_file = os.path.join(base_dir, 'subtitle.zip') + subtitle_target_dir = os.path.join(base_dir, 'subtitle') + + if not os.path.exists(subtitle_target_dir): + os.makedirs(subtitle_target_dir, exist_ok=True) + with zipfile.ZipFile(subtitle_zip_file, 'r') as zip_ref: + for member in zip_ref.namelist(): + # Check if the member is a file (not a directory) + if not member.endswith('/'): + # Extract the file to the specified directory + source = zip_ref.open(member) + target = open(os.path.join(subtitle_target_dir, os.path.basename(member)), 'wb') + with source, target: + target.write(source.read()) + print('The subtitle file has been restored and stored from the zip file.') + else: + print('The subtitle file already exists.') + + def generate_tsv(pth): + + data_file = osp.join(pth, f'{dataset_name}.tsv') + if os.path.exists(data_file) and md5(data_file) == self.MD5: + return + + data_file = pd.read_parquet(os.path.join(pth, 'videomme/test-00000-of-00001.parquet')) + data_file = data_file.assign(index=range(len(data_file))) + data_file['video'] = data_file['videoID'] + data_file['video_path'] = data_file['videoID'].apply(lambda x: f'./video/{x}.mp4') + data_file['subtitle_path'] = data_file['videoID'].apply(lambda x: f'./subtitle/{x}.srt') + data_file['candidates'] = data_file['options'].apply(lambda x: x.tolist()) + + data_file = data_file[['index', 'video', 'video_path', 'duration', 'domain', 'candidates', + 'sub_category', 'task_type', 'subtitle_path', 'question', 'answer']] + + data_file.to_csv(osp.join(pth, f'{dataset_name}.tsv'), sep='\t', index=False) + + if modelscope_flag_set(): + from modelscope import dataset_snapshot_download + dataset_path = dataset_snapshot_download(dataset_id=repo_id) + else: + dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset') + unzip_hf_zip(dataset_path) + generate_tsv(dataset_path) + + data_file = osp.join(dataset_path, f'{dataset_name}.tsv') + + return dict(data_file=data_file, root=dataset_path) + + def save_video_frames(self, video, video_llm=False): + + vid_path = osp.join(self.data_root, 'video', video + '.mp4') + import decord + vid = decord.VideoReader(vid_path) + video_info = { + 'fps': vid.get_avg_fps(), + 'n_frames': len(vid), + } + if self.nframe > 0 and self.fps < 0: + step_size = len(vid) / (self.nframe + 1) + indices = [int(i * step_size) for i in range(1, self.nframe + 1)] + frame_paths = self.frame_paths(video) + elif self.fps > 0: + # not constrained by num_frames, get frames by fps + total_duration = video_info['n_frames'] / video_info['fps'] + required_frames = int(total_duration * self.fps) + step_size = video_info['fps'] / self.fps + indices = [int(i * step_size) for i in range(required_frames)] + frame_paths = self.frame_paths_fps(video, len(indices)) + + flag = np.all([osp.exists(p) for p in frame_paths]) + + if not flag: + lock_path = osp.splitext(vid_path)[0] + '.lock' + with portalocker.Lock(lock_path, 'w', timeout=30): + if not np.all([osp.exists(p) for p in frame_paths]): + images = [vid[i].asnumpy() for i in indices] + images = [Image.fromarray(arr) for arr in images] + for im, pth in zip(images, frame_paths): + if not osp.exists(pth): + im.save(pth) + + return frame_paths, indices, video_info + + def build_prompt(self, line, video_llm): + if isinstance(line, int): + assert line < len(self) + line = self.data.iloc[line] + + frames, indices, video_info = self.save_video_frames(line['video'], video_llm) + + if self.use_subtitle and os.path.exists(osp.join(self.data_root, line['subtitle_path'])): + import pysubs2 + subs = pysubs2.load(osp.join(self.data_root, line['subtitle_path']), encoding='utf-8') + subtitles = [] + + for seleced_frame_id in indices: + sub_text = '' + cur_time = pysubs2.make_time(fps=video_info['fps'], frames=seleced_frame_id) + for sub in subs: + if sub.start < cur_time and sub.end > cur_time: + sub_text = sub.text.replace('\\N', ' ') + break + if sub_text.strip(): + subtitles.append(sub_text) + subtitles = '\n'.join(subtitles) + else: + subtitles = '' + + message = [dict(type='text', value=self.SYS)] + if video_llm: + message.append(dict(type='video', value=osp.join(self.data_root, 'video', line['video'] + '.mp4'))) + else: + for im in frames: + message.append(dict(type='image', value=im)) + + text_prompt = self.FRAMES_TMPL_NOSUB if not self.use_subtitle else self.FRAMES_TMPL_SUB.format(subtitles) + message.append(dict(type='text', value=text_prompt)) + line['question'] += '\n' + '\n'.join(eval(line['candidates'])) + prompt = 'Question: {}\nAnswer: '.format(line['question']) + message.append(dict(type='text', value=prompt)) + return message + + # It returns a dictionary + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + from .utils.videomme import get_dimension_rating, extract_characters_regex, extract_option + + assert get_file_extension(eval_file) in ['xlsx', 'json', 'tsv'], 'data file should be an supported format (xlsx/json/tsv) file' # noqa: E501 + + tmp_file = get_intermediate_file_path(eval_file, '_tmp', 'pkl') + tgt_file = get_intermediate_file_path(eval_file, '_rating', 'json') + score_file = get_intermediate_file_path(eval_file, '_score') + + if not osp.exists(score_file): + model = judge_kwargs.get('model', 'exact_matching') + assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125'] + + if model == 'exact_matching': + model = None + elif gpt_key_set(): + model = build_judge(**judge_kwargs) + if not model.working(): + warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation') + warnings.warn(DEBUG_MESSAGE) + model = None + else: + warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation') + model = None + res = {} if not osp.exists(tmp_file) else load(tmp_file) + res = {k: v for k, v in res.items() if FAIL_MSG not in v} + + data = load(eval_file) + data_un = data[~pd.isna(data['prediction'])] + + for idx in data['index']: + ans = data.loc[data['index'] == idx, 'answer'].values[0] + pred = str(data.loc[data['index'] == idx, 'prediction'].values[0]) + + if extract_characters_regex(pred) == '': + extract_pred = extract_option( + model, + data.loc[data['index'] == idx].to_dict(orient='records')[0], + 'Video-MME' + ) + data.loc[data['index'] == idx, 'score'] = int(extract_pred == ans) + else: + data.loc[data['index'] == idx, 'score'] = int(extract_characters_regex(pred) == ans) + + rejected = [x for x in data['score'] if x == -1] + + print( + f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(data_un)} questions, ' + f'failed to obtain the score for another {len(rejected)} questions. ' + f'Those questions will be counted as -1 score in ALL rating, and will not be counted in VALID rating.' + ) + + dump(data, score_file) + + rating = get_dimension_rating(score_file) + dump(rating, tgt_file) + return rating diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/vlmbias.py b/VLMEvalKit-sudoku/vlmeval/dataset/vlmbias.py new file mode 100644 index 0000000000000000000000000000000000000000..45e0ebb9b8729392497829c4b48774efa0e139d4 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/vlmbias.py @@ -0,0 +1,55 @@ +from ..smp import * +from ..utils import track_progress_rich +from .image_vqa import ImageVQADataset +from .utils.omni_verifier import OmniVerifier +from .utils.multiple_choice import report_acc + + +def VLMBias_auxeval(verifier, pred, gt): + return verifier.verify(pred, gt) + + +class VLMBias(ImageVQADataset): + + DATASET_URL = {'VLMBias': 'https://opencompass.openxlab.space/utils/VLMEval/VLMBias.tsv'} + DATASET_MD5 = {'VLMBias': '23d0119c89243954e81f41a11a2ef347'} + + def evaluate(self, eval_file, **judge_kwargs): + model = judge_kwargs.pop('model', 'gpt-4o') + storage = get_intermediate_file_path(eval_file, f'_{model}') + tmp_file = get_intermediate_file_path(eval_file, f'_{model}_tmp') + nproc = judge_kwargs.pop('nproc', 16) + + if not osp.exists(storage): + data = load(eval_file) + assert 'answer' in data and 'prediction' in data + verifier = OmniVerifier(tmpl='brace', judge=model, retry=3, timeout=60, lower_case=True, rule_only=False) + if not verifier.rule_only: + assert verifier.judge.working(), 'OmniVerifier should have a working API. ' + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + tups = [(verifier, line['prediction'], line['answer']) for line in lines] + indices = [line['index'] for line in lines] + ans = {} + if osp.exists(tmp_file): + ans = load(tmp_file) + tups = [x for x, i in zip(tups, indices) if i not in ans] + indices = [i for i in indices if i not in ans] + if len(indices): + new_results = track_progress_rich( + VLMBias_auxeval, tups, + nproc=nproc, chunksize=nproc, keys=indices, save=tmp_file, + ) + ans = load(tmp_file) + for k, v in zip(indices, new_results): + assert k in ans + assert ans[k][0] == v[0] and ans[k][1] == v[1] + data['hit'] = [ans[idx][0] for idx in data['index']] + data['log'] = [ans[idx][1] for idx in data['index']] + dump(data, storage) + + data = load(storage) + acc = report_acc(data) + score_file = get_intermediate_file_path(eval_file, '_acc') + dump(acc, score_file) + return acc diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/wildvision.py b/VLMEvalKit-sudoku/vlmeval/dataset/wildvision.py new file mode 100644 index 0000000000000000000000000000000000000000..3552a0b8eaa22228c56cf40c43bb9dacfa9cf090 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/wildvision.py @@ -0,0 +1,222 @@ +import re +from functools import partial + +from .image_base import ImageBaseDataset +from .utils import build_judge, DEBUG_MESSAGE +from ..smp import * +from ..smp.file import get_intermediate_file_path +from ..utils import track_progress_rich + + +SYSTEM_PROMPT = """\ +Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user \ +prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate \ +which assistant's answer is better. + +Begin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any \ +answers. + +When evaluating the assistants' answers, compare both assistants' answers with your answer. \ +You must identify and correct any mistakes or inaccurate information. + +Then consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly \ +responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one \ +interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than \ +providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate \ +to what is being asked. Concise means the response is clear and not verbose or excessive. + +Then consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing \ +important information in the assistants' answers that would be beneficial to include when responding to the user \ +prompt. + +After providing your explanation, you must output only one of the following choices as your final verdict with a label: + +1. Assistant A is significantly better: [[A>>B]] +2. Assistant A is slightly better: [[A>B]] +3. Tie, relatively the same: [[A=B]] +4. Assistant B is slightly better: [[B>A]] +5. Assistant B is significantly better: [[B>>A]] + +Example output: "My final verdict is tie: [[A=B]]".\ +""" + + +PROMPT_TEMPLATE = """\ +"<|User Prompt|>\n{question} + +<|The Start of Assistant A's Answer|>\n{answer_1}\n<|The End of Assistant A's Answer|> + +<|The Start of Assistant B's Answer|>\n{answer_2}\n<|The End of Assistant B's Answer|> +""" + + +REGEX_PATTERN = re.compile("\[\[([AB<>=]+)\]\]") # noqa: W605 + + +def get_score(judgement, pattern=REGEX_PATTERN): + matches = pattern.findall(judgement) + matches = [m for m in matches if m != ""] + if len(set(matches)) == 0: + return None, True + elif len(set(matches)) == 1: + return matches[0].strip("\n"), False + else: + return None, True + + +def WildVision_auxeval(model, line): + config = dict(question=line['question'], answer_1=line['A'], answer_2=line['B']) + prompt = PROMPT_TEMPLATE.format(**config) + + prefix = 'data:image/jpeg;base64,' + img = prefix + line['image'] + + messages = [ + dict(type='text', value=prompt), + dict(type='image', value=img) + ] + + retry = 2 + while retry: + resp = model.generate(messages) + score, try_again = get_score(resp) + if not try_again: + break + retry -= 1 + + if score is None: + return 'Unknown' + return score + + +class WildVision(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = { + 'WildVision': 'https://opencompass.openxlab.space/utils/VLMEval/WildVision.tsv' + } + DATASET_MD5 = {'WildVision': 'b38f80156d49411c594772866b0d0b52'} + + score_map = { + 'A>>B': -2, + 'A>B': -1, + 'A=B': 0, + 'B>A': 1, + 'B>>A': 2 + } + + # Given one data record, return the built prompt (a multi-modal message), can override + def build_prompt(self, line): + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = toliststr(line['image_path']) + else: + tgt_path = self.dump_image(line) + + question = line['question'] + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + # WildVision adopts text first + msgs = [dict(type='text', value=question)] + msgs + return msgs + + @classmethod + def gen_eval_base(self, eval_file, b64_map): + data = load(eval_file) + data['B'] = data.pop('prediction') + data['A'] = data.pop('claude3_sonnet') + data['image'] = [b64_map[x] for x in data['index']] + return data + # rev = cp.deepcopy(data) + # rev['A'] = data['B'] + # rev['B'] = data['A'] + # rev['index'] = [x + '_rev' for x in data['index']] + # return pd.concat([data, rev], ignore_index=True) + + # It returns a DataFrame + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + # We adopt pairwise evaluation (twice for a pair) for this dataset + model = judge_kwargs['model'] + storage = get_intermediate_file_path(eval_file, f'_{model}') + score_file = get_intermediate_file_path(eval_file, f'_{model}_score', 'csv') + tmp_file = get_intermediate_file_path(eval_file, f'_{model}', 'pkl') + nproc = judge_kwargs.pop('nproc', 4) + + if not osp.exists(storage): + raw_data = WildVision('WildVision').data + b64_map = {x: y for x, y in zip(raw_data['index'], raw_data['image'])} + data = self.gen_eval_base(eval_file, b64_map) + + judge_kwargs['system_prompt'] = SYSTEM_PROMPT + judge_kwargs['temperature'] = 0 + judge_kwargs['img_detail'] = 'high' + judge_kwargs['timeout'] = 300 + model = build_judge(max_tokens=4096, **judge_kwargs) + + assert model.working(), ('WildVision evaluation requires a working OPENAI API\n' + DEBUG_MESSAGE) + + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + tups = [(model, line) for line in lines] + indices = [line['index'] for line in lines] + + ans = load(tmp_file) if osp.exists(tmp_file) else {} + tups = [x for x, i in zip(tups, indices) if i not in ans] + indices = [i for i in indices if i not in ans] + + if len(indices): + new_results = track_progress_rich( + WildVision_auxeval, + tups, + nproc=nproc, + chunksize=nproc, + keys=indices, + save=tmp_file, + ) + ans = load(tmp_file) + for k, v in zip(indices, new_results): + ans[k] = v + + data['score'] = [ans[idx] for idx in data['index']] + data.pop('image') + dump(data, storage) + + data = load(storage) + lt = len(data) + + scores = defaultdict(lambda: 0) + for i in range(lt): + item = data.iloc[i] + if item['score'] not in self.score_map: + score = 0 + else: + score = self.score_map[item['score']] + if '_rev' in item['index']: + score = -score + scores[score] += 1 + name_map = { + 2: 'Much Better', + 1: 'Better', + 0: 'Tie', + -1: 'Worse', + -2: 'Much Worse' + } + scores = {name_map[k]: v for k, v in scores.items()} + much_better = scores.get('Much Better', 0) + better = scores.get('Better', 0) + worse = scores.get('Worse', 0) + much_worse = scores.get('Much Worse', 0) + scores['Reward'] = ( + 100 * much_better + 50 * better - 50 * worse - 100 * much_worse + ) / lt + scores['Win Rate'] = (better + much_better) / lt + scores = {k: [v] for k, v in scores.items()} + scores = pd.DataFrame(scores) + dump(scores, score_file) + return scores diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/aki.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/aki.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea9eceb16450336eb07a967e34f1fe17eb74fb93 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/aki.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/deepseek_vl2.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/deepseek_vl2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe25cd1bbef40456c09e9707dc94e4941282c939 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/deepseek_vl2.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/flash_vl.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/flash_vl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5171ca61f7be4db2fa501b697a2e32082670c1b8 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/flash_vl.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/kosmos.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/kosmos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44480673e497059e379c06e422200d121e07cdd1 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/kosmos.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/llama4.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/llama4.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcedafad035749093d22b2980132a2c6d707a41a Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/llama4.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mantis.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mantis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..733b80d3cf66ff286a99def1cd94e5b3bcf822aa Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mantis.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/minigpt4.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/minigpt4.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f297994b84577bde163e193ae23cb5ec5c40b240 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/minigpt4.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mmalaya.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mmalaya.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..871786fcd2221ca3ef0610200d609722071eb1e8 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/mmalaya.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/omnilmm.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/omnilmm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88a00ea729f6161e1bcd51e27c636067f8ee46aa Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/omnilmm.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/phi4_multimodal.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/phi4_multimodal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef5bb7e15dc212bf37577f11a1eaf26816999e7a Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/phi4_multimodal.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/qh_360vl.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/qh_360vl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f1d3cecf981e7bf8a58b0c0d7e869c12bea7521 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/qh_360vl.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/rbdash.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/rbdash.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c46549ac19f8fa7cb3775e1388858b09a483150 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/rbdash.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/ross.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/ross.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e6aaf3f1012bbd18409c93a474bc1656ab4bf0b Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/ross.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/ovis/ovis.py b/VLMEvalKit-sudoku/vlmeval/vlm/ovis/ovis.py new file mode 100644 index 0000000000000000000000000000000000000000..fc475037fd4a2a99c5e0d8256d859c33dd9ad7c1 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/vlm/ovis/ovis.py @@ -0,0 +1,722 @@ +import torch +from transformers import AutoModelForCausalLM + +from ..base import BaseModel +from ...dataset import DATASET_TYPE, DATASET_MODALITY +from ...smp import * + + +class Ovis(BaseModel): + INSTALL_REQ = False + INTERLEAVE = True + + def __init__(self, model_path='AIDC-AI/Ovis1.5-Llama3-8B', **kwargs): + assert model_path is not None + # Recommend to install `transformers==4.43.2` and `torch==2.1.2`. + self.model_path = model_path + self.device = torch.cuda.current_device() + self.dtype = torch.bfloat16 + self.model = AutoModelForCausalLM.from_pretrained( + self.model_path, + torch_dtype=self.dtype, + multimodal_max_length=8192, + trust_remote_code=True + ) + self.model = self.model.eval().to(device=self.device) + self.eos_token_id = self.model.generation_config.eos_token_id + self.text_tokenizer = self.model.get_text_tokenizer() + self.pad_token_id = self.text_tokenizer.pad_token_id + self.visual_tokenizer = self.model.get_visual_tokenizer() + self.conversation_formatter = self.model.get_conversation_formatter() + self.image_placeholder = '' + self.gen_kwargs = dict( + max_new_tokens=1024, + do_sample=False, + top_p=None, + top_k=None, + temperature=None, + repetition_penalty=None, + eos_token_id=self.eos_token_id, + pad_token_id=self.pad_token_id, + use_cache=True + ) + self.gen_kwargs.update(kwargs) + + def use_custom_prompt(self, dataset): + if DATASET_TYPE(dataset) == 'Y/N' or DATASET_TYPE(dataset) == 'MCQ': + return True + return False + + def build_prompt(self, line, dataset=None): + assert self.use_custom_prompt(dataset) + assert isinstance(dataset, str) + tgt_path = self.dump_image(line, dataset) + + if DATASET_TYPE(dataset) == 'Y/N': + prompt = self.build_yorn_prompt(line, dataset) + elif DATASET_TYPE(dataset) == 'MCQ': + prompt = self.build_multi_choice_prompt(line, dataset) + else: + raise RuntimeError(f'Invalid dataset type: {DATASET_TYPE(dataset)}') + message = [dict(type='text', value=prompt)] + message.extend([dict(type='image', value=s) for s in tgt_path]) + + # interleave dataset + if dataset.startswith('MMMU_'): + from ... import MMMUDataset + message = MMMUDataset.split_MMMU(message) + + return message + + def build_yorn_prompt(self, line, dataset=None): + prompt = line['question'] + if listinstr(['HallusionBench'], dataset): + prompt += ' Please answer yes or no.' + prompt += '\n请用单个词或短语回答问题。' if cn_string( + prompt) else '\nAnswer the question using a single word or phrase.' + return prompt + + def build_multi_choice_prompt(self, line, dataset=None): + question = line['question'] + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + if hint is not None: + question = hint + '\n' + question + + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + for key, item in options.items(): + question += f'\n{key}. {item}' + prompt = question + + if len(options): + prompt += '\n请直接回答选项字母。' if cn_string( + prompt) else "\nAnswer with the option's letter from the given choices directly." + else: + prompt += '\n请直接回答问题。' if cn_string(prompt) else '\nAnswer the question directly.' + + return prompt + + def generate_inner(self, message, dataset=None): + prompt, input_ids, attention_mask, pixel_values = self.prepare_inputs(message) + output_ids = self.model.generate( + input_ids, + pixel_values=pixel_values, + attention_mask=attention_mask, + **self.gen_kwargs + ) + response = self.text_tokenizer.decode(output_ids[0], skip_special_tokens=True).strip() + + return response + + def prepare_inputs(self, message): + # build query + images = [x['value'] for x in message if x['type'] == 'image'] + texts = [x['value'] for x in message if x['type'] == 'text'] + if len(images) == 0: + query = '\n'.join(texts) + elif len(images) == 1 and len(texts) == 1: + query = self.image_placeholder + '\n' + texts[0] + else: # interleave sample + chunks = [x['value'] if x['type'] == 'text' else self.image_placeholder for x in message] + query = '\n'.join(chunks) + + # format conversation + prompt, input_ids = self.conversation_formatter.format_query(query) + attention_mask = torch.ne(input_ids, self.text_tokenizer.pad_token_id) + input_ids = input_ids.unsqueeze(0).to(device=self.device) + attention_mask = attention_mask.unsqueeze(0).to(device=self.device) + + # preprocess images + if len(images) == 0: + pixel_values = [None] + else: + preprocessed_images = [self.visual_tokenizer.preprocess_image(Image.open(image)) for image in images] + pixel_values = [torch.cat(preprocessed_images, dim=0).to(device=self.device, dtype=self.dtype)] + + return prompt, input_ids, attention_mask, pixel_values + + +class Ovis1_6(BaseModel): + INSTALL_REQ = False + INTERLEAVE = True + + def __init__(self, model_path='AIDC-AI/Ovis1.6-Gemma2-9B', **kwargs): + assert model_path is not None + # Recommend to install `python=3.10`, `transformers==4.44.2`, `torch==2.2.0`, and `numpy==1.24.3` + self.model_path = model_path + self.device = torch.cuda.current_device() + self.dtype = torch.bfloat16 + self.model = AutoModelForCausalLM.from_pretrained( + self.model_path, + torch_dtype=self.dtype, + multimodal_max_length=8192, + trust_remote_code=True + ) + self.model = self.model.eval().to(device=self.device) + self.eos_token_id = self.model.generation_config.eos_token_id + self.text_tokenizer = self.model.get_text_tokenizer() + self.pad_token_id = self.text_tokenizer.pad_token_id + self.visual_tokenizer = self.model.get_visual_tokenizer() + self.max_partition = 9 + self.image_placeholder = '' + self.gen_kwargs = dict( + max_new_tokens=1024, + do_sample=False, + top_p=None, + top_k=None, + temperature=None, + repetition_penalty=None, + eos_token_id=self.eos_token_id, + pad_token_id=self.pad_token_id, + use_cache=True + ) + self.gen_kwargs.update(kwargs) + + def use_custom_prompt(self, dataset): + if DATASET_TYPE(dataset) == 'Y/N' or DATASET_TYPE(dataset) == 'MCQ': + return True + return False + + def build_yorn_prompt(self, line, dataset=None): + prompt = line['question'] + '\nAnswer the question using a single word or phrase.' + return prompt + + def build_multi_choice_prompt(self, line, dataset=None): + question = line['question'] + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + if hint is not None: + question = hint + '\n' + question + + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + for key, item in options.items(): + question += f'\n{key}. {item}' + prompt = question + + if len(options): + prompt += "\nAnswer with the option's letter from the given choices directly." + + return prompt + + def build_prompt(self, line, dataset=None): + assert self.use_custom_prompt(dataset) + assert isinstance(dataset, str) + tgt_path = self.dump_image(line, dataset) + + if DATASET_TYPE(dataset) == 'Y/N': + prompt = self.build_yorn_prompt(line, dataset) + elif DATASET_TYPE(dataset) == 'MCQ': + prompt = self.build_multi_choice_prompt(line, dataset) + else: + raise RuntimeError(f'Invalid dataset type: {DATASET_TYPE(dataset)}') + message = [dict(type='text', value=prompt)] + message.extend([dict(type='image', value=s) for s in tgt_path]) + + # interleave dataset + if dataset.startswith('MMMU_'): + from ... import MMMUDataset + message = MMMUDataset.split_MMMU(message) + + return message + + def generate_inner(self, message, dataset=None): + prompt, input_ids, attention_mask, pixel_values = self.prepare_inputs(message) + output_ids = self.model.generate( + input_ids, + pixel_values=pixel_values, + attention_mask=attention_mask, + **self.gen_kwargs + ) + response = self.text_tokenizer.decode(output_ids[0], skip_special_tokens=True) + + return response + + def prepare_inputs(self, message): + # build query + images = [x['value'] for x in message if x['type'] == 'image'] + texts = [x['value'] for x in message if x['type'] == 'text'] + if len(images) == 0: + query = '\n'.join(texts) + elif len(images) == 1 and len(texts) == 1: + query = self.image_placeholder + '\n' + texts[0] + else: # interleaved sample + chunks = [x['value'] if x['type'] == 'text' else self.image_placeholder for x in message] + query = '\n'.join(chunks) + + # preprocess inputs + prompt, input_ids, pixel_values = self.model.preprocess_inputs( + query, [Image.open(image) for image in images], max_partition=self.max_partition + ) + + # move to self.device + attention_mask = torch.ne(input_ids, self.text_tokenizer.pad_token_id) + input_ids = input_ids.unsqueeze(0).to(device=self.device) + attention_mask = attention_mask.unsqueeze(0).to(device=self.device) + pixel_values = [ + pixel_values.to(device=self.device, dtype=self.dtype) if pixel_values is not None else None + ] + + return prompt, input_ids, attention_mask, pixel_values + + +class Ovis1_6_Plus(Ovis1_6): + # Recommend to install `python=3.10`, `transformers==4.46.2`, `torch==2.4.0`, and `numpy==1.25.0` + + def build_mmmu_prompt(self, line, dataset: str) -> list[dict[str, str]]: + import string + import pandas as pd + + question = line['question'] + options = {cand: line[cand] for cand in string.ascii_uppercase if cand in line and not pd.isna(line[cand])} + options_prompt = 'Options:\n' + for key, item in options.items(): + options_prompt += f'{key}. {item}\n' + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + prompt = '' + if hint is not None: + prompt += f'Hint: {hint}\n' + prompt += f'Question: {question}\n' + if len(options): + prompt += options_prompt + prompt += 'Please select the correct answer from the options above.' + prompt = prompt.rstrip() + return prompt + + def build_prompt(self, line, dataset=None): + assert self.use_custom_prompt(dataset) + assert isinstance(dataset, str) + tgt_path = self.dump_image(line, dataset) + + if dataset.startswith('MMMU_'): + prompt = self.build_mmmu_prompt(line, dataset) + elif DATASET_TYPE(dataset) == 'Y/N': + prompt = self.build_yorn_prompt(line, dataset) + elif DATASET_TYPE(dataset) == 'MCQ': + prompt = self.build_multi_choice_prompt(line, dataset) + else: + raise RuntimeError(f'Invalid dataset type: {DATASET_TYPE(dataset)}') + + message = [dict(type='image', value=s) for s in tgt_path] + [dict(type='text', value=prompt)] + + return message + + +class Ovis2(BaseModel): + INSTALL_REQ = False + INTERLEAVE = True + SIZE_DICT = { + (24, 896): '1B', # (num_hidden_layers, hidden_size) + (28, 1536): '2B', + (36, 2048): '4B', + (28, 3584): '8B', + (48, 5120): '16B', + (64, 5120): '34B' + } + + def __init__(self, model_path='AIDC-AI/Ovis2-8B', **kwargs): + assert model_path is not None + # Recommend to install `python=3.10`, `transformers==4.46.2`, `torch==2.4.0`, and `numpy==1.25.0` + self.model_path = model_path + self.device = torch.cuda.current_device() + self.dtype = torch.bfloat16 + self.model = AutoModelForCausalLM.from_pretrained( + self.model_path, + torch_dtype=self.dtype, + multimodal_max_length=32768, + trust_remote_code=True + ) + self.size = self.SIZE_DICT[ + (self.model.config.llm_config.num_hidden_layers, self.model.config.llm_config.hidden_size)] + self.model = self.model.eval().to(device=self.device) + self.eos_token_id = self.model.generation_config.eos_token_id + self.text_tokenizer = self.model.get_text_tokenizer() + self.pad_token_id = self.text_tokenizer.pad_token_id + self.visual_tokenizer = self.model.get_visual_tokenizer() + self.image_placeholder = '' + self.gen_kwargs = dict( + max_new_tokens=1024, + do_sample=False, + top_p=None, + top_k=None, + temperature=None, + repetition_penalty=None, + eos_token_id=self.eos_token_id, + pad_token_id=self.pad_token_id, + use_cache=True + ) + self.use_cot = { + '1B': {'MathVerse', 'MathVision'}, + '2B': {'MMVet', 'MMStar', 'MathVerse', 'MathVision'}, + '4B': {'MathVerse', 'MathVision'}, + '8B': {'MMVet', 'MMStar', 'MMMU', 'MathVista', 'MathVerse', 'MathVision'}, + '16B': {'MMVet', 'MMStar', 'MMMU', 'MathVista', 'MathVerse', 'MathVision'}, + '34B': {'MMVet', 'MMStar', 'MMMU', 'MathVista', 'MathVerse', 'MathVision'} + } + self.frame_selector = None + if kwargs.pop("frame_selection", False): + from .utils.mdp3 import MDP3 + self.frame_selector = MDP3( + n_selection=int(kwargs.pop("n_frames", 32)), + visual_encoder_name_or_path=kwargs.pop("frame_selection_vlm", "google/siglip-so400m-patch14-384"), + device=f"cuda:{self.device}" + ) + self.gen_kwargs.update(kwargs) + + def use_custom_prompt(self, dataset): + if any(dataset.startswith(prefix) for prefix in ['MMVet', 'MathVista', 'MathVerse', 'MathVision']): + return True + if DATASET_TYPE(dataset) == 'Y/N' or DATASET_TYPE(dataset) == 'MCQ': + return True + return False + + def build_yorn_prompt(self, line, dataset=None): + prompt = line['question'] + if listinstr(['HallusionBench'], dataset) and self.size == '34B': + prompt += ' Please answer yes or no.' + prompt += '\nAnswer the question using a single word or phrase.' + return prompt + + def build_multi_choice_prompt(self, line, dataset=None, use_cot=False): + prompt = line['question'] + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + if hint is not None: + prompt = hint + '\n' + prompt + + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + for key, item in options.items(): + prompt += f'\n{key}. {item}' + + if len(options): + if use_cot: + prompt += "\nProvide a step-by-step solution to the problem, and conclude with 'the answer is' followed by the final solution." + else: + prompt += "\nAnswer with the option's letter from the given choices directly." + + return prompt + + def build_mmvet_prompt(self, line, dataset=None, use_cot=False): + prompt = line['question'] + if use_cot: + prompt += "\nProvide a step-by-step solution to the problem carefully." + return prompt + + def build_math_prompt(self, line, dataset=None, use_cot=False): + prompt = line['question'] + if use_cot: + prompt += "\nProvide a step-by-step solution to the problem, and conclude with 'the answer is' followed by the final solution." + return prompt + + def build_prompt(self, line, dataset=None): + assert self.use_custom_prompt(dataset) + assert isinstance(dataset, str) + tgt_path = self.dump_image(line, dataset) + + use_cot = any(dataset.startswith(prefix) for prefix in self.use_cot[self.size]) + + if dataset == 'MMVet': + prompt = self.build_mmvet_prompt(line, dataset, use_cot) + elif any(dataset.startswith(prefix) for prefix in ('MathVista', 'MathVerse', 'MathVision')): + prompt = self.build_math_prompt(line, dataset, use_cot) + elif DATASET_TYPE(dataset) == 'Y/N': + prompt = self.build_yorn_prompt(line, dataset) + elif DATASET_TYPE(dataset) == 'MCQ': + prompt = self.build_multi_choice_prompt(line, dataset, use_cot) + else: + raise RuntimeError(f'Invalid dataset type: {DATASET_TYPE(dataset)}') + + message = [dict(type='image', value=s) for s in tgt_path] + [dict(type='text', value=prompt)] + + # interleave dataset + if dataset.startswith('MMMU_'): + from ... import MMMUDataset + message = MMMUDataset.split_MMMU(message) + + return message + + def generate_inner(self, message, dataset=None): + def _extract_answer(text): + answer_index = text.lower().find('the answer is') + if answer_index != -1: + answer_index += len('the answer is') + answer = text[answer_index:].lstrip(':').strip() + else: + answer = text + return answer + + # DynaMath + if dataset == 'DynaMath' and self.size == '34B': + message[-1]['value'] += "\nProvide a step-by-step solution to the problem, and conclude with 'the answer is' followed by the final solution." + + prompt, input_ids, attention_mask, pixel_values, max_partition = self.prepare_inputs(message, dataset) + output_ids = self.model.generate( + input_ids, + pixel_values=pixel_values, + attention_mask=attention_mask, + **self.gen_kwargs + ) + response = self.text_tokenizer.decode(output_ids[0], skip_special_tokens=True) + + if "conclude with 'the answer is' followed by the final solution." in prompt: + response = _extract_answer(response) + + return response + + def prepare_inputs(self, message, dataset=None): + # build query + images = [x['value'] for x in message if x['type'] == 'image'] + texts = [x['value'] for x in message if x['type'] == 'text'] + if DATASET_MODALITY(dataset) == 'VIDEO': # video inputs + chunks = [self.image_placeholder for x in message if x['type'] != 'text'] + chunks += [x['value'].strip() for x in message if x['type'] == 'text' and x['value'] != ''] + query = '\n'.join(chunks) + elif len(images) == 0: # text-only inputs + query = '\n'.join(texts) + elif len(images) == 1 and len(texts) == 1: # single-image inputs + query = self.image_placeholder + '\n' + texts[0] + else: # interleaved inputs + chunks = [x['value'].strip() if x['type'] == 'text' else self.image_placeholder for x in message] + query = '\n'.join(chunks) + + # preprocess inputs + if DATASET_MODALITY(dataset) == 'VIDEO': + max_partition = 1 + elif (dataset != None) and any( + dataset.startswith(prefix) for prefix in + ('HallusionBench', 'TextVQA', 'ChartQA', 'OCRBench', 'InfoVQA', 'DocVQA', 'MTVQA')): + max_partition = 12 + elif len(images) > 1: + max_partition = max(1, 12 // len(images)) + else: + max_partition = 9 + + prompt, input_ids, pixel_values = self.model.preprocess_inputs( + query, [Image.open(image) for image in images], max_partition=max_partition, frame_selector=self.frame_selector + ) + + # move to self.device + attention_mask = torch.ne(input_ids, self.text_tokenizer.pad_token_id) + input_ids = input_ids.unsqueeze(0).to(device=self.device) + attention_mask = attention_mask.unsqueeze(0).to(device=self.device) + pixel_values = [ + pixel_values.to(device=self.device, dtype=self.dtype) if pixel_values is not None else None + ] + + return prompt, input_ids, attention_mask, pixel_values, max_partition + + +class OvisU1(BaseModel): + INSTALL_REQ = False + INTERLEAVE = True + + def __init__(self, model_path='AIDC-AI/Ovis-U1-3B', **kwargs): + assert model_path is not None + # Recommend to install `transformers==4.51.3`, `torch==2.4.0`, and `numpy==1.24.3` + self.model_path = model_path + self.device = torch.cuda.current_device() + self.dtype = torch.bfloat16 + + self.model = AutoModelForCausalLM.from_pretrained( + self.model_path, + torch_dtype=self.dtype, + multimodal_max_length=32768, + trust_remote_code=True + ) + self.model = self.model.eval().to(device=self.device) + self.text_tokenizer = self.model.get_text_tokenizer() + self.pad_token_id = self.text_tokenizer.pad_token_id + self.eos_token_id = self.text_tokenizer.eos_token_id + self.visual_tokenizer = self.model.get_visual_tokenizer() + self.image_placeholder = '' + self.gen_kwargs = dict( + max_new_tokens=1024, + do_sample=False, + top_p=None, + top_k=None, + temperature=None, + repetition_penalty=None, + eos_token_id=self.eos_token_id, + pad_token_id=self.pad_token_id, + use_cache=True + ) + self.min_pixels = 200704 # 448*448 + self.max_pixels = 2408448 # 1344*1792 + self.frame_selector = None + if kwargs.pop("frame_selection", False): + from .utils.mdp3 import MDP3 + self.frame_selector = MDP3( + n_selection=int(kwargs.pop("n_frames", 32)), + visual_encoder_name_or_path=kwargs.pop("frame_selection_vlm", "google/siglip-so400m-patch14-384"), + device=f"cuda:{self.device}" + ) + self.gen_kwargs.update(kwargs) + self.use_cot = {'MMMU'} + + def use_custom_prompt(self, dataset): + if any(dataset.startswith(prefix) for prefix in ['MMVet', 'MathVista', 'MathVerse', 'MathVision']): + return True + if DATASET_TYPE(dataset) == 'Y/N' or DATASET_TYPE(dataset) == 'MCQ': + return True + return False + + def build_yorn_prompt(self, line, dataset=None): + prompt = line['question'] + if listinstr(['HallusionBench'], dataset): # and self.size == '34B': + prompt += ' Please answer yes or no.' + prompt += '\nAnswer the question using a single word or phrase.' + return prompt + + def build_multi_choice_prompt(self, line, dataset=None, use_cot=False): + prompt = line['question'] + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + if hint is not None: + prompt = hint + '\n' + prompt + + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + for key, item in options.items(): + prompt += f'\n{key}. {item}' + + if len(options): + if use_cot: + prompt += "\nProvide a step-by-step solution to the problem, and conclude with 'the answer is' followed by the final solution." + else: + prompt += "\nAnswer with the option's letter from the given choices directly." + + return prompt + + def build_mmvet_prompt(self, line, dataset=None, use_cot=False): + prompt = line['question'] + if use_cot: + prompt += "\nProvide a step-by-step solution to the problem carefully." + return prompt + + def build_math_prompt(self, line, dataset=None, use_cot=False): + prompt = line['question'] + if use_cot: + prompt += "\nProvide a step-by-step solution to the problem, and conclude with 'the answer is' followed by the final solution." + return prompt + + def build_prompt(self, line, dataset=None): + assert self.use_custom_prompt(dataset) + assert isinstance(dataset, str) + tgt_path = self.dump_image(line, dataset) + + use_cot = any(dataset.startswith(prefix) for prefix in self.use_cot) + + if dataset == 'MMVet': + prompt = self.build_mmvet_prompt(line, dataset, use_cot) + elif any(dataset.startswith(prefix) for prefix in ('MathVista', 'MathVerse', 'MathVision')): + prompt = self.build_math_prompt(line, dataset, use_cot) + elif DATASET_TYPE(dataset) == 'Y/N': + prompt = self.build_yorn_prompt(line, dataset) + elif DATASET_TYPE(dataset) == 'MCQ': + prompt = self.build_multi_choice_prompt(line, dataset, use_cot) + else: + raise RuntimeError(f'Invalid dataset type: {DATASET_TYPE(dataset)}') + + message = [dict(type='image', value=s) for s in tgt_path] + [dict(type='text', value=prompt)] + + # interleave dataset + if dataset.startswith('MMMU_'): + from ... import MMMUDataset + message = MMMUDataset.split_MMMU(message) + + return message + + def generate_inner(self, message, dataset=None): + def _extract_answer(text): + answer_index = text.lower().find('the answer is') + if answer_index != -1: + answer_index += len('the answer is') + answer = text[answer_index:].lstrip(':').strip() + else: + answer = text + return answer + + # DynaMath + if dataset == 'DynaMath': + message[-1][ + 'value'] += "\nProvide a step-by-step solution to the problem, and conclude with 'the answer is' followed by the final solution." + + prompt, input_ids, attention_mask, pixel_values, grid_thws = self.prepare_inputs(message, dataset) + output_ids = self.model.generate( + input_ids, + pixel_values=pixel_values, + grid_thws=grid_thws, + attention_mask=attention_mask, + **self.gen_kwargs + ) + response = self.text_tokenizer.decode(output_ids[0], skip_special_tokens=True) + + print('\n========================************========================') + print(f'prompt: {prompt}<<<\n') + print(f'output: {response}\n') + + think_end = response.rfind('') + if think_end != -1: + think_end += len('') + response = response[think_end:].strip() + print(f'extract answer: {response}\n') + + if "conclude with 'the answer is' followed by the final solution." in prompt: + response = _extract_answer(response) + print(f'extract answer: {response}\n') + + print('------------------------------------------------------------\n', flush=True) + + return response + + def prepare_inputs(self, message, dataset=None): + # build query + images = [x['value'] for x in message if x['type'] == 'image'] + texts = [x['value'] for x in message if x['type'] == 'text'] + # print(f"=============={DATASET_MODALITY(dataset)}============") + if DATASET_MODALITY(dataset) == 'VIDEO': # video inputs + chunks = [self.image_placeholder for x in message if x['type'] != 'text'] + chunks += [x['value'].strip() for x in message if x['type'] == 'text' and x['value'] != ''] + query = '\n'.join(chunks) + # print(query, chunks) + elif len(images) == 0: # text-only inputs + query = '\n'.join(texts) + elif len(images) == 1 and len(texts) == 1: # single-image inputs + query = self.image_placeholder + '\n' + texts[0] + else: # interleaved inputs + chunks = [x['value'].strip() if x['type'] == 'text' else self.image_placeholder for x in message] + query = '\n'.join(chunks) + + # preprocess inputs + min_pixels = self.min_pixels + max_pixels = self.max_pixels + enable_thinking = os.getenv("OvisThink") == 'True' + prompt, input_ids, pixel_values, grid_thws = self.model.preprocess_inputs( + query, [Image.open(image) for image in images], + frame_selector=self.frame_selector, + enable_thinking=enable_thinking, + min_pixels=min_pixels, + max_pixels=max_pixels, # 2000*2000, + ) + + attention_mask = torch.ne(input_ids, self.text_tokenizer.pad_token_id) + input_ids = input_ids.unsqueeze(0).to(device=self.device) + attention_mask = attention_mask.unsqueeze(0).to(device=self.device) + pixel_values = torch.cat([ + pixel_values.to(device=self.device, dtype=self.dtype) if pixel_values is not None else None + ], dim=0) + grid_thws = torch.cat([ + grid_thws.to(device=self.device) if grid_thws is not None else None + ], dim=0) + + return prompt, input_ids, attention_mask, pixel_values, grid_thws diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/thyme/model.py b/VLMEvalKit-sudoku/vlmeval/vlm/thyme/model.py new file mode 100644 index 0000000000000000000000000000000000000000..742068c9ae0cacbf76ceb0686dbc9238822eb262 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/vlm/thyme/model.py @@ -0,0 +1,618 @@ +from __future__ import annotations + +import copy +import logging +import os +import re +import warnings + +import torch +from transformers.cache_utils import DynamicCache + +from ..base import BaseModel +from .prompt import ThymePromptMixin +from .sandbox import execute_code_in_sandbox +from .utils import ( + REASONING_SYS_PROMPT, + SIMPLE_SYS_PROMPT, + SPECIAL_STRING_LIST, + generate_prompt_final_qa, + generate_prompt_simple_qa, +) + + +def ensure_image_url(image: str) -> str: + prefixes = ["http://", "https://", "file://", "data:image;"] + if any(image.startswith(prefix) for prefix in prefixes): + return image + if os.path.exists(image): + return "file://" + image + raise ValueError(f"Invalid image: {image}") + + +def ensure_video_url(video: str) -> str: + prefixes = ["http://", "https://", "file://", "data:video;"] + if any(video.startswith(prefix) for prefix in prefixes): + return video + if os.path.exists(video): + return "file://" + video + raise ValueError(f"Invalid video: {video}") + + +class Thyme(ThymePromptMixin, BaseModel): + INSTALL_REQ = False + INTERLEAVE = True + VIDEO_LLM = True + + def __init__( + self, + model_path: str, + min_pixels: int | None = None, + max_pixels: int | None = None, + max_new_tokens=2048, + top_p=0.001, + top_k=1, + temperature=0.01, + repetition_penalty=1.0, + # pandayin: rounds of intermediate steps before reaching final answer. + max_iterations=5, + # pandayin: max retry before reaching a valid answer. + max_retry=5, + use_custom_prompt: bool = True, + system_prompt: str | None = "You are a helpful assistant.", + post_process: bool = True, + # if True, will try to only extract stuff wrapped in & + # . + verbose: bool = True, + auto_cleanup: bool = True, + # Clean up intermediate images generated by sandbox code. + **kwargs, + ): + from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor + super().__init__(use_custom_prompt=use_custom_prompt) + self.min_pixels = min_pixels + self.max_pixels = max_pixels + self.top_p = top_p + self.top_k = top_k + self.temperature = temperature + self.system_prompt = system_prompt + self.max_iterations = max_iterations + self.max_retry = max_retry + self.verbose = verbose + self.post_process = post_process + self.auto_cleanup = auto_cleanup + self.fps = 2.0 + self.nframe = 64 + self.FRAME_FACTOR = 2 + assert model_path is not None + self.model_path = model_path + self.processor = AutoProcessor.from_pretrained(model_path) + + self.generate_kwargs = dict( + max_new_tokens=max_new_tokens, + top_p=top_p, + top_k=top_k, + temperature=temperature, + repetition_penalty=repetition_penalty, + stop_strings=SPECIAL_STRING_LIST, + eos_token_id=self.processor.tokenizer.eos_token_id, + tokenizer=self.processor.tokenizer, + ) + + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + model_path, + torch_dtype="auto", + device_map="auto", + attn_implementation="sdpa", + ) + self.model.eval() + + torch.cuda.empty_cache() + + def _extract_image_path(self, contents: list[dict[str, str]]): + user_image_path = "" + content_history = copy.deepcopy(contents) + for rou in content_history: + if rou["type"] != "image": + continue + user_image_path = rou["value"] + break + return user_image_path + + def _prepare_content( + self, inputs: list[dict[str, str]], dataset: str | None = None + ) -> list[dict[str, str]]: + """ + inputs list[dict[str, str]], each dict has keys: ['type', 'value'] + """ + user_image_path = self._extract_image_path(inputs) + content = [] + for s in inputs: + if s["type"] == "image": + item = {"type": "image", "image": ensure_image_url(s["value"])} + if dataset == "OCRBench": + item["min_pixels"] = 10 * 10 * 28 * 28 + warnings.warn( + f"OCRBench dataset uses custom min_pixels={item['min_pixels']}" + ) + if self.max_pixels is not None: + item["max_pixels"] = self.max_pixels + else: + if self.min_pixels is not None: + item["min_pixels"] = self.min_pixels + if self.max_pixels is not None: + item["max_pixels"] = self.max_pixels + elif s["type"] == "video": + item = { + "type": "video", + "video": ensure_video_url(s["value"]), + "min_pixels": self.min_pixels, + "max_pixels": self.max_pixels, + } + if self.fps is not None: + item["fps"] = self.fps + elif self.nframe is not None: + import cv2 + + video = cv2.VideoCapture(s["value"]) + frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) + video.release() + if frame_count < self.nframe: + new_frame_count = ( + frame_count // self.FRAME_FACTOR * self.FRAME_FACTOR) + print(f"use {new_frame_count} for {s['value']}") + item["nframes"] = new_frame_count + else: + item["nframes"] = self.nframe + elif s["type"] == "text": + item = { + "type": "text", + "text": generate_prompt_final_qa( + s["value"], + user_image_path), + } + else: + raise ValueError(f"Invalid message type: {s['type']}, {s}") + content.append(item) + return content + + def _prepare_content_simple( + self, inputs: list[dict[str, str]], dataset: str | None = None + ) -> list[dict[str, str]]: + """ + inputs list[dict[str, str]], each dict has keys: ['type', 'value'] + """ + content = [] + for s in inputs: + if s["type"] == "image": + item = {"type": "image", "image": ensure_image_url(s["value"])} + if dataset == "OCRBench": + item["min_pixels"] = 10 * 10 * 28 * 28 + warnings.warn( + f"OCRBench dataset uses custom min_pixels={item['min_pixels']}" + ) + if self.max_pixels is not None: + item["max_pixels"] = self.max_pixels + else: + if self.min_pixels is not None: + item["min_pixels"] = self.min_pixels + if self.max_pixels is not None: + item["max_pixels"] = self.max_pixels + elif s["type"] == "video": + item = { + "type": "video", + "video": ensure_video_url(s["value"]), + "min_pixels": self.min_pixels, + "max_pixels": self.max_pixels, + } + if self.fps is not None: + item["fps"] = self.fps + elif self.nframe is not None: + import cv2 + + video = cv2.VideoCapture(s["value"]) + frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) + video.release() + if frame_count < self.nframe: + new_frame_count = ( + frame_count // self.FRAME_FACTOR * self.FRAME_FACTOR) + print(f"use {new_frame_count} for {s['value']}") + item["nframes"] = new_frame_count + else: + item["nframes"] = self.nframe + elif s["type"] == "text": + item = { + "type": "text", + "text": generate_prompt_simple_qa( + s["value"])} + else: + raise ValueError(f"Invalid message type: {s['type']}, {s}") + content.append(item) + return content + + def _extract_box_answer(self, response): + resp = response.split("\\boxed{")[-1] + lt = len(resp) + counter, end = 1, None + for i in range(lt): + if resp[i] == "{": + counter += 1 + elif resp[i] == "}": + counter -= 1 + if counter == 0: + end = i + break + elif i == lt - 1: + end = lt + break + if end is not None: + response = resp[:end] + return response + + def _remove_unpickable_values(self, dictionary): + import pickle + + def is_pickable(obj): + try: + pickle.dumps(obj) + return True + except (pickle.PicklingError, TypeError, AttributeError): + return False + + keys_to_remove = [] + for key, value in dictionary.items(): + if isinstance(value, dict): + self._remove_unpickable_values(value) + elif not is_pickable(value): + keys_to_remove.append(key) + for key in keys_to_remove: + del dictionary[key] + return dictionary + + def generate_inner_transformers( + self, + message, + dataset=None, + temp_output_dir=None): + try: + from qwen_vl_utils import process_vision_info + except Exception as err: + logging.critical( + "qwen_vl_utils not found, please install it via 'pip install qwen-vl-utils'" + ) # noqa: E501 + raise err + + user_image_path = self._extract_image_path(message) + + messages = [] + messages.append({"role": "system", "content": REASONING_SYS_PROMPT}) + messages.append( + {"role": "user", "content": self._prepare_content(message, dataset=dataset)} + ) + + if self.verbose: + print(f"\033[31m{messages}\033[0m") + + # ------- outer loop. retry multiple times if fail to reach a valid + retry_generations = self.max_retry + has_valid_answer = False + while (retry_generations > 0) and (not has_valid_answer): + # pandayin: main logic/ work flow for generation. + # The gist is to pause at special tokens ( & ) and + # maybe perform code execution. + conversation_history = copy.deepcopy(messages) + + # For each generation, we initialize a KV-Cache to speed up + # inference. + kv_cache = DynamicCache() + # Maintain a dictionary to save context (local & global vars.) for + # code execution. + previous_execution_context = {} + if self.verbose: + print( + f"\033[32m\n--- Generation {self.max_retry - retry_generations + 1} ---\033[0m" + ) + + # ------- inner loop. generate multiple steps until reaching a + retry_iterations = self.max_iterations + # We assume each answer round is limited to a few code (usually 1) + # execution. + while retry_iterations > 0: + retry_iterations -= 1 + generated_content = [] + if self.verbose: + print( + f"\033[32m\n--- Iteration {self.max_iterations - retry_iterations} ---\033[0m" + ) + + text = self.processor.apply_chat_template( + [conversation_history], tokenize=False, add_generation_prompt=( + retry_iterations == self.max_iterations - 1), ) + + if retry_iterations != self.max_iterations - 1: + if text[0].endswith("<|im_end|>\n"): + text[0] = text[0][: -len("<|im_end|>\n")] + images, videos = process_vision_info([conversation_history]) + inputs = self.processor( + text=text, + images=images, + videos=videos, + padding=True, + return_tensors="pt", + ) + inputs = inputs.to("cuda") + + # just in case this iteration is invalid, we need to roll back, + # thus making a backup. + last_kv_cache = copy.deepcopy(kv_cache) + # bkup context. roll back when we fail to execute the generated + # code. + last_execution_context = copy.deepcopy( + self._remove_unpickable_values(previous_execution_context) + ) + generated_ids = self.model.generate( + **inputs, **self.generate_kwargs, past_key_values=kv_cache + ) + generated_ids = [ + output_ids[len(input_ids):] + for input_ids, output_ids in zip(inputs.input_ids, generated_ids) + ] + out = self.processor.tokenizer.batch_decode( + generated_ids, + skip_special_tokens=True, + clean_up_tokenization_spaces=False, + ) + generated_text_segment = out[0] + + # Case 1: directly give answer + if "" in generated_text_segment: + generated_content.append( + {"type": "text", "text": generated_text_segment}, + ) + + # Case 2: reach code generation. + # parse current result. Two cases: reach or reach + # + code_regex = re.compile( + r"\s*(?:```\s*)?(?:python\s*)?([\s\S]*?)\s*(?:```\s*)?", + re.IGNORECASE, + ) + + code_match = code_regex.search(generated_text_segment) + + # execute code and return result. + if code_match: + code_to_execute = code_match.group(1).strip() + if self.verbose: + print( + f"\033[31m--- Found Code Block ---\n" + f"{generated_text_segment}\n" + f",-------------------------\033[0m" + ) + + ( + processed_img_paths, + captured_stdout, + error_msg, + current_execution_context, + ) = execute_code_in_sandbox( + code_to_execute, + user_image_path, + temp_output_dir=temp_output_dir, + previous_execution_context=previous_execution_context, + ) + previous_execution_context = current_execution_context + if not processed_img_paths: + # deemed as unsuccessful iteration. roll back status. + kv_cache = last_kv_cache + previous_execution_context = last_execution_context + print(f"{error_msg}") + continue + + has_valid_images = False + generated_content += [ + {"type": "text", "text": generated_text_segment}, + {"type": "text", "text": ""}, + ] + first_path = processed_img_paths[0] + if os.path.exists(first_path): + # Iterate through each path in the list + for img_path in processed_img_paths: + if os.path.exists(img_path): + # Add text segments only once per sandbox + # output block + if not has_valid_images: + has_valid_images = True + generated_content.append( + {"type": "image", "image": img_path} + ) + else: + generated_content.append( + {"type": "text", "text": first_path}) + + if has_valid_images or not os.path.exists(first_path): + generated_content.append( + {"type": "text", "text": ""} + ) + else: + # pandayin: a failed code execution/generation doesn't + # count as an intermedia step. + print( + "skip this generation due to error and adapt the temperature" + ) + self.generate_kwargs["temperature"] = 1.0 + continue + else: + # wo code. wo , assume repetition generated, + # break. + if "" not in generated_text_segment: + print("wo code. wo ") + print(generated_text_segment) + self.generate_kwargs["temperature"] = 1.0 + break + + # Update conversation_history with the latest generated segment + # If the last message was 'user', start a new 'assistant' + # message + if conversation_history[-1]["role"] == "user": + conversation_history.append( + {"role": "assistant", "content": generated_content} + ) + # If the last message was 'assistant', append to its last text + # content item + elif conversation_history[-1]["role"] == "assistant": + conversation_history[-1]["content"] += generated_content + + # --- Check for final answer tag if no code was processed in this segment --- + if "" in generated_text_segment: + has_valid_answer = True + print("\033[32m--- Final answer tag found. ---\033[0m") + break + + # If the model produced an EOS token and no code/answer, it + # might be finished + if generated_ids[0][-1] == self.processor.tokenizer.eos_token_id: + if self.verbose: + print( + "\033[32m--- Model generated EOS and no further actions (code/answer)." + "Assuming completion. ---\033[0m" + ) + break + + # End of a generation. Maybe successfully find a valid answer, or + # start a new generation. + if self.verbose: + if has_valid_answer: + print( + f"\033[32m\n--- End of processing (max iterations: {self.max_iterations}," + f"actual: {self.max_iterations - retry_iterations + 1}) ---\033[0m" + ) + break + else: + print( + f"\033[32m\n --- Fail to find a valid answer. (max retrys: {self.max_retry}," + f"actual: {self.max_retry - retry_generations + 1})---\033[0m" + ) + + retry_generations -= 1 + # pandayin: Adjust/reset generation_kwargs here. So more + # explorations could be done to find a valid answer. + print("Fail to find a valid answer and adapt the temperature") + self.generate_kwargs["temperature"] = 1.0 + + # reset generation hyper-param. + self.generate_kwargs["temperature"] = self.temperature + + # pandayin: If we still fail after max_try generations, try a simple + # prompt. + if not has_valid_answer: + print( + f"\033[32m\n --- Fail to find a valid answer after {self.max_retry} retrys." + f"Falling back to simple prompt.---\033[0m" + ) + del self.generate_kwargs["stop_strings"] + + messages = [] + if self.system_prompt is not None: + messages.append( + {"role": "system", "content": SIMPLE_SYS_PROMPT}) + messages.append( + { + "role": "user", + "content": self._prepare_content_simple(message, dataset=dataset), + } + ) + conversation_history = copy.deepcopy(messages) + text = self.processor.apply_chat_template( + [conversation_history], tokenize=False, add_generation_prompt=True) + + images, videos = process_vision_info([conversation_history]) + inputs = self.processor( + text=text, + images=images, + videos=videos, + padding=True, + return_tensors="pt", + ) + inputs = inputs.to("cuda") + generated_ids = self.model.generate( + **inputs, + **self.generate_kwargs, + ) + generated_ids = [ + output_ids[len(input_ids):] + for input_ids, output_ids in zip(inputs.input_ids, generated_ids) + ] + out = self.processor.tokenizer.batch_decode( + generated_ids, + skip_special_tokens=True, + clean_up_tokenization_spaces=False, + ) + generated_text_segment = out[0] + + self.generate_kwargs["stop_strings"] = SPECIAL_STRING_LIST + + # to align with the following processing procedure. wrap a + # bracket. + answer_match = re.search( + r"(.*?)", generated_text_segment, re.DOTALL + ) + if not answer_match: + generated_text_segment = ( + "" + generated_text_segment + "" + ) + conversation_history.append( + { + "role": "assistant", + "content": [{"type": "text", "text": generated_text_segment}], + } + ) + + final_assistant_response = "" + for msg in reversed(conversation_history): + if msg["role"] != "assistant": + continue + current_content_str = "" + for item in msg["content"]: + if item["type"] == "text": + current_content_str += item["text"] + # Get the last full response from assistant + final_assistant_response = current_content_str + break + + if self.post_process: + print( + f"\033[31m--- Final response ---\n{final_assistant_response}\n-------------------------\033[0m" + ) + # Extract content within tags from the final assistant + # response + answer_match = re.search( + r"(.*?)", final_assistant_response, re.DOTALL + ) + if answer_match: + final_answer = answer_match.group(1).strip() + else: + final_answer = "No answer tag found in the final output." + + # Sometimes the answer is still wrapped in \boxed{}, keeping the behaviour of Qwen2.5-VL. + # We extract the answer within this. + match = re.search(r"\\boxed\{(.*?)\}", final_answer) + if match: + final_answer = self._extract_box_answer(final_answer) + + if self.verbose: + print(f"\033[32m{final_answer}\033[0m") + return final_answer + else: + return final_assistant_response + + def generate_inner(self, message, dataset=None): + if self.auto_cleanup: + import tempfile + + with tempfile.TemporaryDirectory() as temp_dir: + return self.generate_inner_transformers( + message, dataset=dataset, temp_output_dir=temp_dir + ) + else: + return self.generate_inner_transformers(message, dataset=dataset) diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/xcomposer/xcomposer2.py b/VLMEvalKit-sudoku/vlmeval/vlm/xcomposer/xcomposer2.py new file mode 100644 index 0000000000000000000000000000000000000000..6f1f6b9feb2bd4aaecca1fcb1c36af83af6a6fd8 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/vlm/xcomposer/xcomposer2.py @@ -0,0 +1,213 @@ +import torch +import torchvision +from transformers import AutoModel, AutoTokenizer +from PIL import Image +from ..base import BaseModel +from ...smp import * +from ...dataset import DATASET_TYPE +import re +pattern = re.compile(r'[A-Z]') + + +def __padding__(image): + width, height = image.size + tar = max(width, height) + top_padding = int((tar - height) / 2) + bottom_padding = tar - height - top_padding + left_padding = int((tar - width) / 2) + right_padding = tar - width - left_padding + image = torchvision.transforms.functional.pad(image, [left_padding, top_padding, right_padding, bottom_padding]) + return image + + +meta_instruction = """ +You are an AI assistant whose name is InternLM-XComposer (浦语·灵笔). +- InternLM-XComposer (浦语·灵笔) is a multi-modality conversational language model that is developed by +Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless. +- InternLM-XComposer (浦语·灵笔) can understand and communicate fluently in the language +chosen by the user such as English and 中文. +- InternLM-XComposer (浦语·灵笔) is capable of comprehending and articulating responses effectively +based on the provided image. +""" + + +def model_gen(model, text, images, need_bos=True, padding=False, beams=3, max_token=500): + pt1 = 0 + embeds = [] + im_mask = [] + images = [images] + images_loc = [0] + for i, pts in enumerate(images_loc + [len(text)]): + subtext = text[pt1:pts] + if need_bos or len(subtext) > 0: + text_embeds = model.encode_text(subtext, add_special_tokens=need_bos) + embeds.append(text_embeds) + im_mask.append(torch.zeros(text_embeds.shape[:2]).cuda()) + need_bos = False + if i < len(images): + try: + image = Image.open(images[i]).convert('RGB') + except: + image = images[i].convert('RGB') + if padding: + image = __padding__(image) + image = model.vis_processor(image).unsqueeze(0).cuda() + image_embeds = model.encode_img(image) + embeds.append(image_embeds) + im_mask.append(torch.ones(image_embeds.shape[:2]).cuda()) + pt1 = pts + embeds = torch.cat(embeds, dim=1) + im_mask = torch.cat(im_mask, dim=1) + im_mask = im_mask.bool() + + outputs = model.generate( + inputs_embeds=embeds, + im_mask=im_mask, + temperature=1.0, + max_new_tokens=max_token, + num_beams=beams, + do_sample=False, + repetition_penalty=1.0) + + output_token = outputs[0] + if output_token[0] == 0 or output_token[0] == 1: + output_token = output_token[1:] + output_text = model.tokenizer.decode(output_token, add_special_tokens=False) + output_text = output_text.split('[UNUSED_TOKEN_145]')[0].strip() + return output_text + + +class XComposer2(BaseModel): + + INSTALL_REQ = False + INTERLEAVE = False + + def __init__(self, model_path='internlm/internlm-xcomposer2-vl-7b', **kwargs): + assert model_path is not None + self.model_path = model_path + + model = AutoModel.from_pretrained(self.model_path, device_map='cpu', trust_remote_code=True).cuda().eval() + model.half() + tokenizer = AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=True) + model.tokenizer = tokenizer + self.model = model + self.device = self.model.model.tok_embeddings.weight.device + + def generate_mme(self, image_path, text): + text = text.split('Please answer')[0].strip() + text = f'{text} Answer this question briefly' + text = f'[UNUSED_TOKEN_146]user\n{text}[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n' + + return model_gen(self.model, text, image_path, need_bos=True, padding=True, beams=5) + + def generate_multichoice(self, image_path, text, dataset): + out = model_gen(self.model, text, image_path, need_bos=True, padding=False, beams=5, max_token=5) + if 'mmmu' in dataset.lower(): + return out + res = pattern.findall(out) + if len(res) == 0: + print('Error:', out) + res = 'Z' + return res[0] + + def generate_vqa(self, image_path, text): + out = model_gen(self.model, text, image_path, need_bos=True) + return out + + def generate_vanilla(self, image_path, text): + text = ( + '[UNUSED_TOKEN_146]system\n{}[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]user\n{}' + 'Answer this question in detail.[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n' + ).format(meta_instruction, text) + out = model_gen(self.model, text, image_path, need_bos=True, max_token=500) + return out + + def generate_brief(self, image_path, text): + text = ( + '[UNUSED_TOKEN_146]user\nAnswer the question using a single word or phrase.{}' + '[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n' + ).format(text) + out = model_gen(self.model, text, image_path, need_bos=True, max_token=10) + return out + + def generate_driectly(self, image_path, text): + text = '[UNUSED_TOKEN_146]user\n{}[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n'.format(text) + out = model_gen(self.model, text, image_path, need_bos=True, max_token=500) + return out + + def generate_inner(self, message, dataset=None): + prompt, image_path = self.message_to_promptimg(message, dataset=dataset) + with torch.cuda.amp.autocast(): + if dataset is None: + return self.generate_vanilla(image_path, prompt) + assert isinstance(dataset, str) + if dataset == 'MME': + return self.generate_mme(image_path, prompt) + + elif listinstr(['hallu'], dataset.lower()): + return self.generate_brief(image_path, prompt) + + elif listinstr(['llava'], dataset.lower()): + return self.generate_vanilla(image_path, prompt) + + elif dataset is not None and DATASET_TYPE(dataset) == 'MCQ': + return self.generate_multichoice(image_path, prompt, dataset) + + elif dataset is not None and DATASET_TYPE(dataset) == 'VQA': + return self.generate_vqa(image_path, prompt) + + else: + return self.generate_vanilla(image_path, prompt) + + def use_custom_prompt(self, dataset): + assert dataset is not None + if DATASET_TYPE(dataset) == 'MCQ' or DATASET_TYPE(dataset) == 'VQA': + return True + return False + + def build_mcqa(self, line): + question = line['question'] + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + img_prompt = '[UNUSED_TOKEN_146]user\n' + if len(options): + options_prompt = '' + for key, item in options.items(): + options_prompt += f'{key}. {item} ' + options_prompt = options_prompt.strip() + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + + context = 'N/A' if hint is None else hint + mid_prompt = 'Question: ' + question + '\nContext: ' + context + '\nOptions: ' + options_prompt + ans_prompt = '[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\nThe answer is' + prompt = img_prompt + mid_prompt + ans_prompt + else: + mid_prompt = f'Answer the question using a single word or phrase.{question}' + ans_prompt = '[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n' + prompt = img_prompt + mid_prompt + ans_prompt + + return prompt + + def build_prompt(self, line, dataset=None): + assert dataset is None or isinstance(dataset, str) + assert self.use_custom_prompt(dataset) + tgt_path = self.dump_image(line, dataset) + + if DATASET_TYPE(dataset) == 'MCQ': + prompt = self.build_mcqa(line) + elif DATASET_TYPE(dataset) == 'VQA': + if 'mathvista' in dataset.lower(): + q = line['question'] + prompt = f'[UNUSED_TOKEN_146]user\n{q}[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n' + else: + q = line['question'] + prompt = ( + f'[UNUSED_TOKEN_146]user\nAnswer the question using a single word or phrase.{q}' + '[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n' + ) + message = [dict(type='text', value=prompt)] + message.extend([dict(type='image', value=s) for s in tgt_path]) + return message