| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """TODO: Add a description here.""" |
|
|
|
|
| import csv |
| import json |
| import os |
| from pathlib import Path |
|
|
| import datasets |
| import pandas as pd |
| import ast |
| import jsonlines |
| from mrcad import Design |
| from mrcad.editing_actions import EditExecution |
| from .data_conversion_utils import ( |
| get_design_from_record, |
| get_strokes_from_record, |
| get_edit_actions_from_record, |
| ) |
|
|
|
|
| |
| |
| _CITATION = """\ |
| @InProceedings{huggingface:dataset, |
| title = {A great new dataset}, |
| author={huggingface, Inc. |
| }, |
| year={2020} |
| } |
| """ |
|
|
| |
| _DESCRIPTION = """ |
| """ |
|
|
| |
| _HOMEPAGE = "" |
|
|
| |
| _LICENSE = "" |
|
|
| _URLS = ["mrcad_data_feb_25.zip"] |
|
|
| DISTANCE_THRESHOLD = 0.2 |
|
|
| MRCAD_PRECISION = 3 |
|
|
|
|
| class mrCADDataset(datasets.GeneratorBasedBuilder): |
| """Dataset of interaractions for multimodal refinement of computer-aided designs.""" |
|
|
| VERSION = datasets.Version("1.0.0") |
|
|
| BUILDER_CONFIGS = [ |
| datasets.BuilderConfig("full", description="Full dataset"), |
| datasets.BuilderConfig( |
| "drawing_only", description="Full dataset with text instructions removed" |
| ), |
| datasets.BuilderConfig( |
| "text_only", description="Full dataset with drawing instructions removed" |
| ), |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "full" |
|
|
| def _info(self): |
| instruction_features = { |
| "text": datasets.Value("string"), |
| "drawing": datasets.Features( |
| { |
| "splines": datasets.Sequence( |
| datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
| ) |
| }, |
| ), |
| } |
|
|
| design_features = datasets.Features( |
| { |
| |
| |
| |
| "curves": datasets.LargeList( |
| feature=datasets.Features( |
| { |
| "type": datasets.Value("string"), |
| "control_points": datasets.Sequence( |
| datasets.Sequence(datasets.Value("float32")) |
| ), |
| } |
| ) |
| ) |
| } |
| ) |
|
|
| features = datasets.Features( |
| { |
| "trial_id": datasets.Value("string"), |
| "target_id": datasets.Value("string"), |
| "target": design_features, |
| "dyad_id": datasets.Value("string"), |
| "trial_num": datasets.Value("int32"), |
| "rounds": datasets.LargeList( |
| datasets.Features( |
| { |
| "round_num": datasets.Value("int32"), |
| "context": design_features, |
| "instruction": instruction_features, |
| "execution": { |
| "design": design_features, |
| }, |
| "edit_execution": datasets.Features( |
| { |
| "edits": datasets.LargeList( |
| datasets.Features( |
| { |
| "edit_type": datasets.Value("string"), |
| "point": datasets.Sequence( |
| datasets.Value("float32") |
| ), |
| "new_point": datasets.Sequence( |
| datasets.Value("float32") |
| ), |
| "control_points": datasets.Sequence( |
| datasets.Sequence( |
| datasets.Value("float32") |
| ) |
| ), |
| "type": datasets.Value("string"), |
| "offset": datasets.Sequence( |
| datasets.Value("float32") |
| ), |
| } |
| ) |
| ), |
| "design": design_features, |
| } |
| ), |
| } |
| ) |
| ), |
| } |
| ) |
|
|
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
| def prepare_trial(self, trial): |
| return { |
| "trial_id": trial["trialId"], |
| "target_id": trial["targetId"], |
| "target": ast.literal_eval(trial["target"])["design"], |
| "dyad_id": trial["dyadId"], |
| "trial_num": trial["trialNum"], |
| "rounds": [ |
| { |
| "round_num": round_num, |
| "context": ( |
| get_design_from_record(ast.literal_eval(context)) |
| .round(MRCAD_PRECISION) |
| .model_dump(mode="json") |
| if context != "[]" |
| else Design(curves=[]).model_dump(mode="json") |
| ), |
| "instruction": { |
| "text": text if isinstance(text, str) else "", |
| "drawing": ( |
| { |
| "splines": get_strokes_from_record( |
| ast.literal_eval(strokes) |
| ) |
| } |
| if strokes |
| else None |
| ), |
| }, |
| "execution": { |
| "design": get_design_from_record(ast.literal_eval(execution)) |
| .round(MRCAD_PRECISION) |
| .model_dump(mode="json"), |
| }, |
| "edit_execution": EditExecution.execute( |
| ( |
| get_design_from_record(ast.literal_eval(context)).round( |
| MRCAD_PRECISION |
| ) |
| if context != "[]" |
| else Design(curves=[]) |
| ), |
| get_edit_actions_from_record( |
| ast.literal_eval(actions)[ |
| len(ast.literal_eval(prevActions)) : |
| ], |
| ( |
| get_design_from_record(ast.literal_eval(context)).round( |
| MRCAD_PRECISION |
| ) |
| if context != "[]" |
| else Design(curves=[]) |
| ), |
| get_design_from_record(ast.literal_eval(execution)).round( |
| MRCAD_PRECISION |
| ), |
| ), |
| ).model_dump(mode="json"), |
| } |
| for ( |
| round_num, |
| text, |
| strokes, |
| context, |
| execution, |
| actions, |
| prevActions, |
| ) in zip( |
| trial["roundNum"], |
| trial["text"], |
| trial["strokes"], |
| trial["prevJsGeometries_li"], |
| trial["jsGeometries"], |
| trial["actions"], |
| trial["prevActions"], |
| ) |
| ], |
| } |
|
|
| def _split_generators(self, dl_manager): |
| rounds_file_path = dl_manager.download_and_extract(_URLS) |
|
|
| df = pd.read_csv(Path(rounds_file_path[0]) / "df_rounds_all_up_to_feb_25.csv") |
| df.rename(columns={"trialId_sp": "trialId"}, inplace=True) |
|
|
| if self.config.name == "drawing_only": |
| df["text"] = df["text"].apply(lambda x: None) |
| elif self.config.name == "text_only": |
| df["strokes"] = df["strokes"].apply(lambda x: None) |
|
|
| consolidated_df = ( |
| df[~df.practice_sp] |
| .sort_values("roundNum") |
| .groupby("trialId") |
| .agg( |
| { |
| "trialId": "first", |
| "text": list, |
| "targetId": "first", |
| "target": "first", |
| "dyadId": "first", |
| "trialNum": "first", |
| "roundNum": list, |
| "prevJsGeometries_li": list, |
| "jsGeometries": list, |
| "strokes": list, |
| "distance": list, |
| "experiment_subset": "first", |
| "actions": list, |
| "prevActions": list, |
| } |
| ) |
| ) |
|
|
| consolidated_df["verified"] = consolidated_df.apply( |
| lambda x: x.distance[-1] < DISTANCE_THRESHOLD |
| and x.roundNum == [i + 1 for i, _ in enumerate(x.roundNum)], |
| axis=1, |
| ) |
|
|
| |
| |
| mask = (consolidated_df.experiment_subset == "coverage") & ( |
| consolidated_df.verified |
| ) |
| consolidated_df.loc[mask, "split"] = "coverage_verified" |
|
|
| mask = (consolidated_df.experiment_subset == "coverage") & ( |
| ~consolidated_df.verified |
| ) |
| consolidated_df.loc[mask, "split"] = "coverage_unverified" |
|
|
| mask = (consolidated_df.experiment_subset == "eval") & ( |
| ~consolidated_df.verified |
| ) |
| consolidated_df.loc[mask, "split"] = "eval_unverified" |
|
|
| eval_verified_mask = (consolidated_df.experiment_subset == "eval") & ( |
| consolidated_df.verified |
| ) |
| eval_verified_df = consolidated_df[eval_verified_mask] |
| trial_counts = eval_verified_df.groupby("targetId")["trialId"].nunique() |
| completed_targets = trial_counts[trial_counts >= 3].index.tolist() |
|
|
| complete_mask = eval_verified_mask & consolidated_df["targetId"].isin( |
| completed_targets |
| ) |
| consolidated_df.loc[complete_mask, "split"] = "eval_verified_complete" |
|
|
| incomplete_mask = eval_verified_mask & ~consolidated_df["targetId"].isin( |
| completed_targets |
| ) |
| consolidated_df.loc[incomplete_mask, "split"] = "eval_verified_incomplete" |
|
|
| self.trials = consolidated_df |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split("coverage_verified"), |
| gen_kwargs={ |
| "split": "coverage_verified", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split("coverage_unverified"), |
| gen_kwargs={ |
| "split": "coverage_unverified", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split("eval_unverified"), |
| gen_kwargs={ |
| "split": "eval_unverified", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split("eval_verified_complete"), |
| gen_kwargs={ |
| "split": "eval_verified_complete", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split("eval_verified_incomplete"), |
| gen_kwargs={ |
| "split": "eval_verified_incomplete", |
| }, |
| ), |
| ] |
|
|
| def _generate_examples(self, split): |
| for row in self.trials[self.trials.split == split].itertuples(): |
| trial = self.prepare_trial(row._asdict()) |
| yield row.trialId, trial |
|
|