MemeRewardModelDataset / data_preprocess.py
Karroyan's picture
Upload data_preprocess.py with huggingface_hub
322bf9d verified
import json
import random
import jsonlines
import os
def load_data_jsonl(data_path):
data = []
with open(data_path, "r+", encoding="utf8") as f:
for item in jsonlines.Reader(f):
data.append(item)
return data
def load_data(data_path):
with open(data_path, 'r') as f:
data = json.load(f)
return data
def ensure_dir_exists(path):
"""Create directory if it doesn't exist"""
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
print(f"Created directory: {directory}")
def build_dataset(data_list, path):
with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/prompt/reward_model_prompt.txt', 'r') as f:
PROMPT = f.read()
dict_list = []
for id, d in enumerate(data_list):
data_json = {'id': id,
'image': d["image_list"],
'conversations': [
{'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'}, # f'<image>{replace_with_zh(PROMPT, True)}
{'from': 'gpt', 'value': d["label"]}
]}
dict_list.append(data_json)
with open(path, 'w', encoding='utf-8') as file:
for entry in dict_list:
json.dump(entry, file)
file.write('\n')
return len(dict_list)
def build_dataset_multihead(data_list, path, mask):
with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/prompt/reward_model_prompt.txt', 'r') as f:
PROMPT = f.read()
dict_list = []
for id, d in enumerate(data_list):
data_json = {'id': id,
'image': d["image_list"],
'conversations': [
{'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'}, # f'<image>{replace_with_zh(PROMPT, True)}
{'from': 'gpt', 'value': [[d["label"]]*2, mask]}
]}
dict_list.append(data_json)
with open(path, 'w', encoding='utf-8') as file:
for entry in dict_list:
json.dump(entry, file)
file.write('\n')
return len(dict_list)
def build_dataset_cross(data_list, path, TYPE):
with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/prompt/reward_model_prompt.txt', 'r') as f:
PROMPT = f.read()
dict_list = []
origin_image_list = []
boring_image_list = []
origin_text_lengths = []
boring_text_lengths = []
for id, d in enumerate(data_list):
if d["label"] == 0:
origin_image_list.append(d["image_list"][0])
boring_image_list.append(d["image_list"][1])
origin_text_lengths.append(d["text_lengths"][0])
boring_text_lengths.append(d["text_lengths"][1])
elif d["label"] == 1:
origin_image_list.append(d["image_list"][1])
boring_image_list.append(d["image_list"][0])
origin_text_lengths.append(d["text_lengths"][1])
boring_text_lengths.append(d["text_lengths"][0])
else:
raise ValueError("Wrong label")
# for origin, boring in zip(origin_image_list, boring_image_list):
# if 'origin' not in origin or TYPE[:-4] not in boring:
# raise ValueError("Wrong split")
print(f'sorting the boring images')
# Create pairs of boring images with their text lengths and sort once
boring_with_lengths = list(zip(boring_image_list, boring_text_lengths))
boring_with_lengths.sort(key=lambda x: x[1]) # Sort by text length (ascending)
print(f'generating the pairs')
for id, origin in enumerate(origin_image_list):
original_length = origin_text_lengths[id]
# Find the index where boring text lengths become longer than original
longer_idx = 0
while longer_idx < len(boring_with_lengths) and boring_with_lengths[longer_idx][1] <= original_length:
longer_idx += 1
# With 70% probability, choose a boring image with longer text if available
# if longer_idx < len(boring_with_lengths) and random.random() < 0.7:
# # Sample from longer text images
# boring = random.choice(boring_with_lengths[longer_idx:])[0]
# else:
# # Sample from shorter text images, or all if none are longer
# if longer_idx > 0:
# boring = random.choice(boring_with_lengths[:longer_idx])[0]
# else:
# boring = random.choice(boring_with_lengths)[0]
boring = random.choice(boring_with_lengths)[0]
pos_neg = random.choice(["pos", "neg"])
if pos_neg == 'pos':
data_json = {'id': id,
'image': [origin, boring],
'conversations': [
{'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'},
{'from': 'gpt', 'value': 0}
]}
dict_list.append(data_json)
else:
data_json = {'id': id,
'image': [boring, origin],
'conversations': [
{'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'},
{'from': 'gpt', 'value': 1}
]}
dict_list.append(data_json)
with open(path, 'w', encoding='utf-8') as file:
for entry in dict_list:
json.dump(entry, file)
file.write('\n')
return len(dict_list)
def build_dataset_cross_multihead(data_list, path, TYPE, mask):
with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/prompt/reward_model_prompt.txt', 'r') as f:
PROMPT = f.read()
dict_list = []
origin_image_list = []
boring_image_list = []
origin_text_lengths = []
boring_text_lengths = []
for id, d in enumerate(data_list):
if d["label"] == 0:
origin_image_list.append(d["image_list"][0])
boring_image_list.append(d["image_list"][1])
origin_text_lengths.append(d["text_lengths"][0])
boring_text_lengths.append(d["text_lengths"][1])
elif d["label"] == 1:
origin_image_list.append(d["image_list"][1])
boring_image_list.append(d["image_list"][0])
origin_text_lengths.append(d["text_lengths"][1])
boring_text_lengths.append(d["text_lengths"][0])
else:
raise ValueError("Wrong label")
# for origin, boring in zip(origin_image_list, boring_image_list):
# if 'origin' not in origin or TYPE[:-4] not in boring:
# raise ValueError("Wrong split")
print(f'sorting the boring images')
# Create pairs of boring images with their text lengths and sort once
boring_with_lengths = list(zip(boring_image_list, boring_text_lengths))
boring_with_lengths.sort(key=lambda x: x[1]) # Sort by text length (ascending)
print(f'generating the pairs')
for id, origin in enumerate(origin_image_list):
original_length = origin_text_lengths[id]
# Find the index where boring text lengths become longer than original
longer_idx = 0
while longer_idx < len(boring_with_lengths) and boring_with_lengths[longer_idx][1] <= original_length:
longer_idx += 1
# With 70% probability, choose a boring image with longer text if available
if longer_idx < len(boring_with_lengths) and random.random() < 0.7:
# Sample from longer text images
boring = random.choice(boring_with_lengths[longer_idx:])[0]
else:
# Sample from shorter text images, or all if none are longer
if longer_idx > 0:
boring = random.choice(boring_with_lengths[:longer_idx])[0]
else:
boring = random.choice(boring_with_lengths)[0]
pos_neg = random.choice(["pos", "neg"])
if pos_neg == 'pos':
data_json = {'id': id,
'image': [origin, boring],
'conversations': [
{'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'},
{'from': 'gpt', 'value': [[0]*2, mask]}
]}
dict_list.append(data_json)
else:
data_json = {'id': id,
'image': [boring, origin],
'conversations': [
{'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'},
{'from': 'gpt', 'value': [[1]*2, mask]}
]}
dict_list.append(data_json)
with open(path, 'w', encoding='utf-8') as file:
for entry in dict_list:
json.dump(entry, file)
file.write('\n')
return len(dict_list)
def build_json(dataset_path_list, length_list, name_list, json_path):
dict_list = []
for dataset_path, length, name in zip(dataset_path_list, length_list, name_list):
dict = {
f"{name}": {
"root": "",
"annotation": dataset_path,
"data_augment": False,
"repeat_time": 1,
"length": length
}
}
dict_list.append(dict)
with open(json_path, 'w', encoding='utf-8') as file:
for dict in dict_list:
json.dump(dict, file)
file.write('\n')
def split_train_test(data, train_path, test_path):
random.shuffle(data)
selected_items = data[:int(len(data) * 0.9)]
unselected_items = data[int(len(data) * 0.9):]
with open(train_path, 'w') as f:
json.dump(selected_items, f)
with open(test_path, 'w') as f:
json.dump(unselected_items, f)
return selected_items, unselected_items
def split_train_test_original(original_dataset):
# First, load and split the original dataset to get the indices
original_data = load_data(original_dataset)
random.shuffle(original_data)
# Split the original data
train_data_original = original_data[:int(len(original_data) * 0.9)]
test_data_original = original_data[int(len(original_data) * 0.9):]
# Extract image IDs from filenames (assuming filenames are like "image_xxx.jpg")
train_image_ids = []
for item in train_data_original:
# Extract ID from original_image filename
filename = item["original_image"].split("/")[-1] # Get just the filename
train_image_ids.append(filename)
test_image_ids = []
for item in test_data_original:
# Extract ID from original_image filename
filename = item["original_image"].split("/")[-1] # Get just the filename
test_image_ids.append(filename)
with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/Eimages_train_ids.jsonl', 'w') as f:
json.dump(train_image_ids, f)
with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/Eimages_test_ids.jsonl', 'w') as f:
json.dump(test_image_ids, f)
if __name__ == '__main__':
NAME_list = ['object_add'] # 'text_replaced', 'lowperformancememe', 'irrelevantmeme', 'boringmeme', 'boring_detailed'
TYPE_list = ['cross', '']
mask_dict = { # 0: mask, 1: no mask, first: humor, second: relate
'text_replaced': [1, 1], # text replaced, both humor and relate no mask
'lowperformancememe': [1, 0], # low performance meme, humor no mask, relate mask
'irrelevantmeme': [0, 1], # irrelevant meme, humor mask, relate no mask
'boringmeme': [1, 0] # boring meme, humor no mask, relate mask
}
for NAME in NAME_list:
for TYPE in TYPE_list:
if NAME == 'lowperformancememe':
dataset = f'/fs-computility/niuyazhe/lixueyan/meme/memetrash/{NAME}.jsonl'
elif NAME == 'text_replaced' or NAME == 'boring_detailed':
dataset = f'/fs-computility/niuyazhe/lixueyan/meme/memetrash/Eimages_{NAME}.json'
else:
# dataset = f'/fs-computility/niuyazhe/lixueyan/meme/memetrash/{NAME}.json'
dataset = "/fs-computility/niuyazhe/shared/meme/data/meme/Eimages/Eimages_object_2.jsonl"
original_dataset = '/fs-computility/niuyazhe/lixueyan/jmj/DIlab/meme/memetrash/processed_dections_Eimage_UPDATED.json'
train_image_ids = load_data_jsonl('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/Eimages_train_ids.jsonl')
test_image_ids = load_data_jsonl('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/Eimages_test_ids.jsonl')
# split_train_test_original(original_dataset)
if TYPE != '':
dataset_path_train =f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}_{TYPE}/Ejson/{NAME}_{TYPE}_train.jsonl'
dataset_path_test = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}_{TYPE}/Ejson/{NAME}_{TYPE}_test.jsonl'
json_path_train = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}_{TYPE}/{NAME}_{TYPE}_train.jsonl'
json_path_test = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}_{TYPE}/{NAME}_{TYPE}_test.jsonl'
else:
dataset_path_train =f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/Ejson/{NAME}_train.jsonl'
dataset_path_test = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/Ejson/{NAME}_test.jsonl'
json_path_train = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/{NAME}_train.jsonl'
json_path_test = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/{NAME}_test.jsonl'
train_path = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/raw_data/train.json'
test_path = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/raw_data/test.json'
ensure_dir_exists(dataset_path_train)
ensure_dir_exists(dataset_path_test)
ensure_dir_exists(json_path_train)
ensure_dir_exists(json_path_test)
ensure_dir_exists(train_path)
ensure_dir_exists(test_path)
# # Now load the current dataset
# if NAME == 'object_add':
# data = load_data_jsonl(dataset)
# else:
# data = load_data(dataset)
# # Process train data based on original split
# train_data_list = []
# test_data_list = []
# for d in data:
# pos_neg = random.choice(["pos", "neg"])
# # Extract text lengths
# original_image_length = 0
# new_image_length = 0
# # Calculate text length for new image from detections
# if "detections" in d:
# for detection in d["detections"]:
# if "text" in detection:
# new_image_length += len(detection["text"])
# # Find original image in original dataset to get its text length
# original_filename = d["original_image"].split("/")[-1]
# for orig_item in load_data(original_dataset):
# if orig_item["image_path"].split("/")[-1] == original_filename:
# if "detections" in orig_item:
# for detection in orig_item["detections"]:
# if "text" in detection:
# original_image_length += len(detection["text"])
# break
# # Create data dictionary with text lengths
# if pos_neg == "pos":
# data_dict = {"image_list": [d["original_image"], d["new_image"]],
# "label": 0,
# "text_lengths": [original_image_length, new_image_length]}
# else:
# data_dict = {"image_list": [d["new_image"], d["original_image"]],
# "label": 1,
# "text_lengths": [new_image_length, original_image_length]}
# # Get the filename from the original image path
# filename = d["original_image"].split("/")[-1]
# # only for object changed
# filename = filename.replace('(','').replace(')','').replace(' ','')
# # breakpoint()
# # Assign to train or test based on the original split
# if filename in train_image_ids[0]:
# train_data_list.append(data_dict)
# else:
# test_data_list.append(data_dict)
# print(len(train_data_list), len(test_data_list))
# # Save processed data
# with open(train_path, 'w') as f:
# json.dump(train_data_list, f)
# with open(test_path, 'w') as f:
# json.dump(test_data_list, f)
# exit()
# Build datasets
train_data = load_data(train_path)
test_data = load_data(test_path)
if 'meme' in NAME:
name = NAME[:-4]
else:
name = NAME
if TYPE == '':
length_train = build_dataset(train_data, dataset_path_train)
build_json([dataset_path_train], [length_train], [name], json_path_train)
length_test = build_dataset(test_data, dataset_path_test)
build_json([dataset_path_test], [length_test], [name], json_path_test)
elif TYPE == 'cross':
length_train = build_dataset_cross(train_data, dataset_path_train, NAME)
build_json([dataset_path_train], [length_train], [name+'_'+TYPE], json_path_train)
length_test = build_dataset_cross(test_data, dataset_path_test, NAME)
build_json([dataset_path_test], [length_test], [name+'_'+TYPE], json_path_test)
elif TYPE == 'align_multihead':
length_train = build_dataset_multihead(train_data, dataset_path_train, mask_dict[NAME])
build_json([dataset_path_train], [length_train], [name], json_path_train)
length_test = build_dataset_multihead(test_data, dataset_path_test, mask_dict[NAME])
build_json([dataset_path_test], [length_test], [name], json_path_test)
elif TYPE == 'cross_multihead':
length_train = build_dataset_cross_multihead(train_data, dataset_path_train, NAME, mask_dict[NAME])
build_json([dataset_path_train], [length_train], [name+'_'+TYPE], json_path_train)
length_test = build_dataset_cross_multihead(test_data, dataset_path_test, NAME, mask_dict[NAME])
build_json([dataset_path_test], [length_test], [name+'_'+TYPE], json_path_test)
print(f'Done {NAME} {TYPE}')