|
|
import os |
|
|
import csv |
|
|
import json |
|
|
import argparse |
|
|
import pandas as pd |
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser(description='Codes for splitting the train/validation/test data for cross-domain generalization') |
|
|
parser.add_argument('-domain_name', '-dname', type=str, default="Origami", choices=['Origami', 'Shuffle_Cards', 'Tangram', 'Tying_Knots']) |
|
|
parser.add_argument('-split_path', '-spath', type=str, default="/media/alexa/WORKSPACE/Shijia-stage-two/new_struggle_dataset/splits/crossdomain_generalization") |
|
|
parser.add_argument('-save_path', '-save', type=str, default="/media/alexa/WORKSPACE/Shijia-stage-two/new_struggle_dataset/splits/crossdomain_generalization/") |
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
def df_rows_to_dict(df, sebset, activity, json_data): |
|
|
for index, row in df.iterrows(): |
|
|
|
|
|
|
|
|
video_name = activity + '-' + row['video_name'] |
|
|
duration = row['duration'] |
|
|
fps = row['fps'] |
|
|
|
|
|
|
|
|
video_dict = { |
|
|
"subset": sebset, |
|
|
"duration": duration, |
|
|
"fps": fps, |
|
|
"annotations": [] |
|
|
} |
|
|
|
|
|
|
|
|
row['struggle'] = json.loads(row['struggle']) |
|
|
row['struggle(frames)'] = json.loads(row['struggle(frames)']) |
|
|
if len(row['struggle']) > 0: |
|
|
for idx, segment_items in enumerate(row['struggle']): |
|
|
struggle_annotation = { |
|
|
"label": "Struggle", |
|
|
"segment": segment_items, |
|
|
"segment(frames)": row['struggle(frames)'][idx], |
|
|
"label_id": 1 |
|
|
} |
|
|
|
|
|
video_dict["annotations"].append(struggle_annotation) |
|
|
|
|
|
|
|
|
json_data["database"][video_name] = video_dict |
|
|
return json_data |
|
|
|
|
|
|
|
|
print(f"Currently preparing activity name: {args.domain_name}") |
|
|
root_path = args.split_path |
|
|
args.save_path = os.path.join(args.save_path, args.domain_name) |
|
|
if not os.path.exists(args.save_path): |
|
|
os.makedirs(args.save_path) |
|
|
domains_list = ['Origami', 'Shuffle_Cards', 'Tangram', 'Tying_Knots'] |
|
|
json_data = { |
|
|
"version": "crossdomain_generalization", |
|
|
"database": {} |
|
|
} |
|
|
for domain_name in domains_list: |
|
|
if domain_name == args.domain_name: |
|
|
|
|
|
files_ls = os.listdir(os.path.join(root_path, domain_name)) |
|
|
csv_list = [] |
|
|
for file in files_ls: |
|
|
if file.split('.')[1] == 'csv': |
|
|
csv_list.append(file) |
|
|
num_sub_activities = len(csv_list) - 2 |
|
|
|
|
|
for i in range(num_sub_activities): |
|
|
csv_file_name = domain_name + '_' + "subactivity{:02d}".format(i+1) + '_' + 'unseentest.csv' |
|
|
df = pd.read_csv(os.path.join(root_path, domain_name, csv_file_name)) |
|
|
json_data = df_rows_to_dict(df, 'test_subactivity{:02d}'.format(i+1), domain_name, json_data) |
|
|
else: |
|
|
|
|
|
csv_file_name = domain_name + '_' + 'train.csv' |
|
|
df = pd.read_csv(os.path.join(root_path, domain_name, csv_file_name)) |
|
|
json_data = df_rows_to_dict(df, 'train', domain_name, json_data) |
|
|
|
|
|
csv_file_name = domain_name + '_' + 'val.csv' |
|
|
df = pd.read_csv(os.path.join(root_path, domain_name, csv_file_name)) |
|
|
json_data = df_rows_to_dict(df, 'validation', domain_name, json_data) |
|
|
|
|
|
|
|
|
|
|
|
with open(os.path.join(args.save_path, f"{args.domain_name}_crossdomain.json"), 'w') as fp: |
|
|
json.dump(json_data, fp, indent=4) |
|
|
|
|
|
print("Done!") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|