code stringlengths 17 6.64M |
|---|
class AVATAR_OT_SetRestPose(bpy.types.Operator):
bl_idname = 'avt.set_rest_pose'
bl_label = 'Reset Pose'
bl_options = {'REGISTER'}
def execute(self, context):
global mAvt
motion_utils.set_rest_pose(mAvt.skel, mAvt.skel_ref, mAvt.list_bones)
mAvt.frame = 1
return {'FINISHED'}
|
class AVATAR_OT_LoadBVH(bpy.types.Operator):
bl_idname = 'avt.load_bvh'
bl_label = 'Load BVH'
bl_description = 'Transfer motion to human model'
filepath: bpy.props.StringProperty(subtype='FILE_PATH')
act_x: bpy.props.BoolProperty(name='X')
act_y: bpy.props.BoolProperty(name='Y')
act_z: bpy.props.BoolProperty(name='Z')
def invoke(self, context, event):
bpy.context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def execute(self, context):
global avt_path
global mAvt
scn = context.scene
obj = context.active_object
file_path_bvh = self.filepath
bone_corresp_file = ('%s/motion/rigs/%s.txt' % (avt_path, scn.skel_rig))
if (obj is not None):
retarget.retarget_addon(bone_corresp_file, file_path_bvh, obj, scn.skel_rig)
else:
print('Please, select a model to transfer the bvh action')
return {'FINISHED'}
|
class AVATAR_PT_MotionPanel(bpy.types.Panel):
bl_idname = 'AVATAR_PT_MotionPanel'
bl_label = 'Motion'
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Avatar'
bpy.types.Object.bvh_offset = IntProperty(name='Offset', description='Start motion offset', default=0, min=0, max=250)
bpy.types.Object.bvh_start_origin = BoolProperty(name='Origin', description='Start at origin', default=False)
def draw(self, context):
layout = self.layout
obj = context.object
wm = context.window_manager
layout.operator('avt.set_rest_pose', text='Reset pose')
layout.prop(context.scene, 'skel_rig', text='')
layout.operator('avt.load_bvh', text='Load BVH')
|
def enum_menu_items():
global avt_path
rigs_folder = ('%s/motion/rigs' % avt_path)
rigs_names = [f for f in os.listdir(rigs_folder) if f.endswith('.txt')]
menu_items = []
i = 0
for rig in rigs_names:
i = (i + 1)
rigsplit = rig.split('.')
name = rigsplit[0]
menu_items.append((name, name, '', i))
return menu_items
|
def register():
gcoll = bpy.utils.previews.new()
gcoll.images_location = ('%s/dressing/cloth_previews' % avt_path)
avt_preview_collections['thumbnail_previews'] = gcoll
bpy.types.Scene.avt_thumbnails = EnumProperty(items=generate_previews())
bpy.types.Scene.skel_rig = bpy.props.EnumProperty(items=enum_menu_items())
from bpy.utils import register_class
for clas in classes:
register_class(clas)
|
def unregister():
from bpy.utils import unregister_class
for clas in classes:
unregister_class(clas)
for gcoll in avt_preview_collections.values():
bpy.utils.previews.remove(gcoll)
avt_preview_collections.clear()
del bpy.types.Scene.avt_thumbnails
del bpy.types.Scene.skel_rig
|
def read_eigenbody(filename):
eigenbody = []
f_eigen = open(filename, 'r')
for line in f_eigen:
eigenbody.append(float(line))
return np.array(eigenbody)
|
def compose_vertices_eigenmat(eigenmat):
eigenvertices = []
for i in range(0, len(eigenmat), 3):
eigenvertices.append([eigenmat[i], (- eigenmat[(i + 2)]), eigenmat[(i + 1)]])
return np.array(eigenvertices)
|
def get_material_id(name_cloth):
idx_list = clthlst.index(name_cloth)
return cloth_class[idx_list]
|
def load_cloth(cloth_file, cloth_name):
bpy.ops.import_scene.obj(filepath=cloth_file)
bpy.context.selected_objects[0].name = cloth_name
bpy.context.selected_objects[0].data.name = cloth_name
b = bpy.data.objects[cloth_name]
b.select_set(True)
bpy.context.view_layer.objects.active = b
bpy.ops.object.mode_set(mode='OBJECT')
if (bpy.data.objects.get('Avatar') is not None):
a = bpy.data.objects['Avatar']
b = bpy.data.objects[cloth_name]
a.select_set(True)
b.select_set(True)
bpy.context.view_layer.objects.active = a
bpy.ops.object.parent_set(type='ARMATURE_AUTO')
for obj in bpy.data.objects:
obj.select_set(False)
|
def read_file_textures(root_path, fold_name):
tex_col = tex_norm = tex_spec = None
ftex = open(('%s/dressing/textures/%s/default.txt' % (root_path, fold_name)), 'r')
lines = []
for line in ftex:
lines.append(line.strip())
ftex.close()
num_lines = len(lines)
if (num_lines == 1):
tex_col = ('%s/dressing/textures/%s/%s' % (root_path, fold_name, lines[0]))
elif (num_lines == 2):
tex_col = ('%s/dressing/textures/%s/%s' % (root_path, fold_name, lines[0]))
tex_norm = ('%s/dressing/textures/%s/%s' % (root_path, fold_name, lines[1]))
elif (num_lines == 3):
tex_col = ('%s/dressing/textures/%s/%s' % (root_path, fold_name, lines[0]))
tex_norm = ('%s/dressing/textures/%s/%s' % (root_path, fold_name, lines[1]))
tex_spec = ('%s/dressing/textures/%s/%s' % (root_path, fold_name, lines[2]))
else:
print('Error reading default texture file')
return (tex_col, tex_norm, tex_spec)
|
def load_studio(root_path):
s_file = ('%s/dressing/models/studio_plane.obj' % root_path)
bpy.ops.import_scene.obj(filepath=s_file)
bpy.context.selected_objects[0].name = 'studio_plane'
bpy.context.selected_objects[0].data.name = 'studio_plane'
for o in bpy.context.scene.objects:
if (o.type == 'CAMERA'):
o.select_set(True)
elif (o.type == 'LIGHT'):
o.select_set(True)
else:
o.select_set(False)
bpy.ops.object.delete()
cam_data = bpy.data.cameras.new('CameraData')
cam_object = bpy.data.objects.new(name='Camera', object_data=cam_data)
bpy.context.collection.objects.link(cam_object)
cam_object.location = (0, (- 66.2), 9.28)
cam_object.rotation_euler = (math.radians(90), 0, 0)
fill_data = bpy.data.lights.new(name='FillData', type='SUN')
fill_data.energy = 1
fill_object = bpy.data.objects.new(name='fill', object_data=fill_data)
bpy.context.collection.objects.link(fill_object)
bpy.context.view_layer.objects.active = fill_object
fill_object.location = (32.29, (- 25.6), 48.17)
fill_object.rotation_euler = (math.radians((- 15)), math.radians(30), math.radians((- 14)))
back_data = bpy.data.lights.new(name='BackData', type='SUN')
back_data.energy = 1
back_object = bpy.data.objects.new(name='back', object_data=back_data)
bpy.context.collection.objects.link(back_object)
bpy.context.view_layer.objects.active = back_object
back_object.location = (33.46, 46.93, 41.5)
back_object.rotation_euler = (math.radians(45), math.radians((- 23)), math.radians(31))
key_data = bpy.data.lights.new(name='KeyData', type='SUN')
key_data.energy = 1
key_object = bpy.data.objects.new(name='key', object_data=key_data)
bpy.context.collection.objects.link(key_object)
bpy.context.view_layer.objects.active = key_object
key_object.location = ((- 36.88), (- 30.55), 49.1)
key_object.rotation_euler = (math.radians(14), math.radians((- 54)), math.radians(11))
dg = bpy.context.evaluated_depsgraph_get()
dg.update()
|
def create_material_generic(matname, index, matid):
for m in bpy.data.materials:
if ('Default' in m.name):
bpy.data.materials.remove(m)
mat_name = ('%s_mat%02d' % (matname, index))
skinMat = (bpy.data.materials.get(mat_name) or bpy.data.materials.new(mat_name))
skinMat.pass_index = matid
skinMat.use_nodes = True
skinMat.node_tree.nodes.clear()
tex_image = skinMat.node_tree.nodes.new(type='ShaderNodeTexImage')
tex_image.location = (0, 0)
tex_norm = skinMat.node_tree.nodes.new(type='ShaderNodeTexImage')
tex_norm.location = (0, (- 600))
tex_spec = skinMat.node_tree.nodes.new(type='ShaderNodeTexImage')
tex_spec.location = (0, (- 300))
norm_map = skinMat.node_tree.nodes.new(type='ShaderNodeNormalMap')
norm_map.location = (300, (- 600))
principled = skinMat.node_tree.nodes.new(type='ShaderNodeBsdfPrincipled')
principled.location = (600, 0)
output = skinMat.node_tree.nodes.new(type='ShaderNodeOutputMaterial')
output.location = (1000, 0)
skinMat.node_tree.links.new(tex_image.outputs['Color'], principled.inputs['Base Color'])
skinMat.node_tree.links.new(tex_norm.outputs['Color'], norm_map.inputs['Color'])
skinMat.node_tree.links.new(norm_map.outputs['Normal'], principled.inputs['Normal'])
skinMat.node_tree.links.new(tex_spec.outputs['Color'], principled.inputs['Specular'])
skinMat.node_tree.links.new(principled.outputs['BSDF'], output.inputs['Surface'])
return skinMat
|
def assign_textures_generic_mat(body, cmat, tex_img, tex_norm, tex_spec):
body.select_set(True)
if (len(body.material_slots) == 0):
bpy.context.view_layer.objects.active = body
bpy.ops.object.material_slot_add()
body.material_slots[0].material = cmat
img_tex_img = img_tex_norm = img_tex_spec = None
if (tex_img is not None):
img_name = os.path.basename(tex_img)
img_tex_img = (bpy.data.images.get(img_name) or bpy.data.images.load(tex_img))
if (tex_norm is not None):
img_name = os.path.basename(tex_norm)
img_tex_norm = (bpy.data.images.get(img_name) or bpy.data.images.load(tex_norm))
if (tex_spec is not None):
img_name = os.path.basename(tex_spec)
img_tex_spec = (bpy.data.images.get(img_name) or bpy.data.images.load(tex_spec))
matnodes = cmat.node_tree.nodes
for n in matnodes:
if (n.type == 'NORMAL_MAP'):
matnodes.active = n
n.select = True
n.inputs[0].default_value = 1.0
if (n.type == 'TEX_IMAGE'):
if (n.name == 'Image Texture'):
if (img_tex_img is not None):
matnodes.active = n
n.select = True
n.image = img_tex_img
if (n.name == 'Image Texture.001'):
if (img_tex_norm is not None):
matnodes.active = n
n.select = True
n.image = img_tex_norm
n.image.colorspace_settings.name = 'Non-Color'
if (n.name == 'Image Texture.002'):
if (img_tex_spec is not None):
matnodes.active = n
n.select = True
n.image = img_tex_spec
n.image.colorspace_settings.name = 'Non-Color'
body.select_set(False)
|
def read_text_lines(filename):
list_bones = []
text_file = open(filename, 'r')
lines = text_file.readlines()
for line in lines:
line_split = line.split()
if (len(line_split) == 2):
list_bones.append([line_split[0], line_split[1]])
else:
list_bones.append([line_split[0], 'none'])
return list_bones
|
def find_bone_match(list_bones, bone_name):
bone_match = 'none'
for b in list_bones:
if (b[0] == bone_name):
bone_match = b[1]
break
return bone_match
|
def matrix_scale(scale_vec):
return Matrix([[scale_vec[0], 0, 0, 0], [0, scale_vec[1], 0, 0], [0, 0, scale_vec[2], 0], [0, 0, 0, 1]])
|
def matrix_for_bone_from_parent(bone, ao):
eb1 = ao.data.bones[bone.name]
E = eb1.matrix_local
ebp = ao.data.bones[bone.name].parent
E_p = ebp.matrix_local
return (E_p.inverted() @ E)
|
def matrix_the_hard_way(pose_bone, ao):
if (pose_bone.rotation_mode == 'QUATERNION'):
mr = pose_bone.rotation_quaternion.to_matrix().to_4x4()
else:
mr = pose_bone.rotation_euler.to_matrix().to_4x4()
m1 = ((Matrix.Translation(pose_bone.location) @ mr) @ matrix_scale(pose_bone.scale))
E = ao.data.bones[pose_bone.name].matrix_local
if (pose_bone.parent is None):
return (E @ m1)
else:
m2 = matrix_the_hard_way(pose_bone.parent, ao)
E_p = ao.data.bones[pose_bone.parent.name].matrix_local
return (((m2 @ E_p.inverted()) @ E) @ m1)
|
def worldMatrix(ArmatureObject, Bone):
_bone = ArmatureObject.pose.bones[Bone]
_obj = ArmatureObject
return (_obj.matrix_world * _bone.matrix)
|
def pose_to_match(arm, goal, bc):
'\n pose arm so that its bones line up with the REST pose of goal\n '
matrix_os = {}
for bone in arm.data.bones:
bone_match = find_bone_match(bc, bone.name)
if (bone_match is not 'none'):
ebp = goal.pose.bones[bone_match]
matrix_os[bone_match] = matrix_the_hard_way(ebp, goal)
print('DEBUG')
for to_pose in arm.pose.bones:
bone_match = find_bone_match(bc, to_pose.name)
if (bone_match is not 'none'):
goal_bone = bone_match
if (to_pose.parent is None):
len2 = arm.data.bones[to_pose.name].length
len1 = goal.data.bones[goal_bone].length
print(goal_bone)
m1 = ((arm.matrix_world @ matrix_os[goal_bone]) @ to_pose.bone.matrix_local)
(loc, rot, scale) = m1.decompose()
if ('QUATERNION' == to_pose.rotation_mode):
to_pose.rotation_quaternion = rot
else:
to_pose.rotation_euler = rot.to_euler(to_pose.rotation_mode)
else:
mp = (matrix_the_hard_way(to_pose.parent, arm) @ matrix_for_bone_from_parent(to_pose, arm))
print(mp)
m2 = (mp.inverted() @ matrix_os[goal_bone])
(loc, rot, scale) = m2.decompose()
if ('QUATERNION' == to_pose.rotation_mode):
to_pose.rotation_quaternion = rot
else:
to_pose.rotation_euler = rot.to_euler(to_pose.rotation_mode)
print('last debug')
print(rot)
to_pose.keyframe_insert('rotation_euler', frame=1, group=to_pose.name)
|
def set_rest_pose(skeleton):
for bone in skeleton.pose.bones:
bone.rotation_mode = 'XYZ'
bone.rotation_euler = (0, 0, 0)
|
def set_hips_origin(skeleton, hips_name):
hips_bone = skeleton.pose.bones[hips_name]
hips_bone.location = (0, 0, 0)
|
def find_scale_factor(skel, trg_skel, hips_name_skel, hips_name_target):
hips_pos_skel = (skel.matrix_world @ Matrix.Translation(skel.pose.bones[hips_name_skel].head)).to_translation()
hips_pos_targ = (trg_skel.matrix_world @ Matrix.Translation(trg_skel.pose.bones[hips_name_target].head)).to_translation()
print(hips_pos_skel)
print(hips_pos_targ)
return (hips_pos_targ[2] / hips_pos_skel[2])
|
def read_text_lines(filename):
list_bones = []
text_file = open(filename, 'r')
lines = text_file.readlines()
for line in lines:
line_split = line.split()
if (len(line_split) == 2):
list_bones.append([line_split[0], line_split[1]])
else:
list_bones.append([line_split[0], 'none'])
return list_bones
|
def find_bone_match(list_bones, bone_name):
bone_match = 'none'
for b in list_bones:
if (b[0] == bone_name):
bone_match = b[1]
break
return bone_match
|
class CocoDet(CocoDataset):
def __init__(self, tokenizer, multimodal_cfg=None, vis_processor=None, vis_root=None, add_eos=True, ignore_instruction=True, filter_small=False, test_mode=False, max_gt_per_img=100):
self.multimodal_cfg = multimodal_cfg
self.tokenizer = tokenizer
self.vis_root = vis_root
self.vis_processor = vis_processor
self.max_gt_per_img = max_gt_per_img
self.add_eos = add_eos
self.ignore_instruction = ignore_instruction
self.filter_small = filter_small
self.test_mode = test_mode
img_norm_cfg = dict(mean=[(0.48145466 * 255), (0.4578275 * 255), (0.40821073 * 255)], std=[(0.26862954 * 255), (0.26130258 * 255), (0.27577711 * 255)], to_rgb=True)
train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32), dict(type='FilterAnnotations', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=224), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
test_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotations', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=224), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
if test_mode:
pipeline = test_pipeline
else:
pipeline = train_pipeline
if test_mode:
ann_file = f'{self.vis_root}/annotations/instances_val2017.json'
img_prefix = (self.vis_root + '/val2017')
else:
ann_file = f'{self.vis_root}/annotations/instances_train2017.json'
img_prefix = (self.vis_root + '/train2017')
train = dict(ann_file=ann_file, img_prefix=img_prefix, test_mode=False, pipeline=pipeline)
super(CocoDataset, self).__init__(**train)
self.num_classes = len(self.CLASSES)
begin_str = '<image>\nIn the conversation below, you simply answer the category name based on what you see in the imagery inside a particular region.I will give you only one region each time. Categories Containing '
class_str = ', '.join(self.CLASSES)
self.begin_str = ((begin_str + class_str) + '.\n')
def train_process_test(self, data_item):
image = data_item['img'].data
ori_labels = data_item['gt_labels'].data
ori_bboxes = data_item['gt_bboxes'].data
shuffle_ids = torch.randperm(len(ori_labels))
if (len(shuffle_ids) > self.max_gt_per_img):
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
ori_bboxes = ori_bboxes[shuffle_ids]
ori_labels = ori_labels[shuffle_ids]
sources = dict()
sources['conversations'] = []
for i in range(len(ori_labels)):
question = random.choice(QUESTIONS).strip()
question = question.replace('<spi_descript>', '<bbox>')
if (i == 0):
question = (self.begin_str + question)
answer = self.CLASSES[ori_labels[i]]
sources['conversations'].append({'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14))
assert (image.shape[1] == image.shape[2])
sources = preprocess_multimodal(copy.deepcopy([sources['conversations']]), self.multimodal_cfg, cur_token_len)
data_dict = preprocess(sources, self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0], labels=data_dict['labels'][0])
data_dict['image'] = image
ori_bboxes = (copy.deepcopy(ori_bboxes) / image.shape[1])
data_dict['bboxes'] = ori_bboxes
data_dict['img_metas'] = data_item['img_metas'].data
return data_dict
def process_text(self, data_item):
if isinstance(data_item['img'], list):
data_item = {k: v[0] for (k, v) in data_item.items()}
return self.train_process_test(data_item)
def tokenize(self, text):
res = self.tokenizer((text['instruction'] + text['answer']), return_tensors=None, padding='do_not_pad', truncation=True, max_length=512)
if ((res['input_ids'][(- 1)] != self.tokenizer.eos_token_id) and (len(res['input_ids']) < 512) and self.add_eos):
res['input_ids'].append(self.tokenizer.eos_token_id)
res['attention_mask'].append(1)
labels = copy.deepcopy(res['input_ids'])
if self.ignore_instruction:
bbox_index = labels.index(self.tokenizer.encode('<bbox>')[1])
labels[:bbox_index] = ([(- 100)] * bbox_index)
res.update(labels=labels)
return res
def __getitem__(self, idx):
data_item = super().__getitem__(idx)
data_dict = self.process_text(data_item=data_item)
return data_dict
|
@dataclass
class DataCollatorForDetDataset(object):
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances):
(input_ids, labels, img_metas, bboxes) = tuple(([instance.get(key, None) for instance in instances] for key in ('input_ids', 'labels', 'img_metas', 'bboxes')))
input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)
batch = dict(input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id), img_metas=img_metas, bboxes=bboxes)
if ('image' in instances[0]):
images = [instance['image'] for instance in instances]
if all((((x is not None) and (x.shape == images[0].shape)) for x in images)):
batch['images'] = torch.stack(images)
else:
batch['images'] = images
return batch
|
def make_multitask_data_module(tokenizer, data_args):
'Make dataset and collator for supervised fine-tuning.'
if (data_args.dataset_config is not None):
dataset_config = Config.fromfile(data_args.dataset_config)
multimodal_cfg = dict(is_multimodal=data_args.is_multimodal, sep_image_conv_front=data_args.sep_image_conv_front, image_token_len=data_args.image_token_len, image_aspect_ratio=data_args.image_aspect_ratio, use_im_start_end=getattr(data_args, 'mm_use_im_start_end', False), image_processor=getattr(data_args, 'image_processor', None))
train_dataset = build_spi_dataset(dataset_config.spi_datasets, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg)
data_collator = DataCollatorForDetDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator)
|
def build_spi_dataset(dataset_config, tokenizer=None, multimodal_cfg=None, **kwargs):
if isinstance(dataset_config, list):
datasets = []
for cfg in dataset_config:
temp_dataset = build_spi_dataset(cfg, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
datasets.append(temp_dataset)
type_string = [type(item) for item in datasets]
print(('#' * 20), type_string, ('#' * 20))
for dataset in datasets:
print(('#' * 20), type(dataset), f'len = {len(dataset)}', ('#' * 20))
return ConcatDataset(datasets)
dataset_type = dataset_config.pop('type')
ratio = dataset_config.pop('ratio', 1)
if (dataset_type == 'coco_det'):
dataset = CocoDet(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'flickr30k'):
dataset = Flickr30k(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'VGDATA'):
dataset = VGDATA(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'det_llava'):
dataset = DetLLava(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'vcr'):
dataset = VCRDataset(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'single_vcr'):
dataset = SingleVCRDataset(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'multi_vcr'):
dataset = MultiVCRDataset(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'RefCOCO'):
dataset = RefCOCO(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'RefCOCOP'):
dataset = RefCOCOP(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
elif (dataset_type == 'RefCOCOG'):
dataset = RefCOCOG(**dataset_config, tokenizer=tokenizer, multimodal_cfg=multimodal_cfg, **kwargs)
else:
raise NotImplementedError
if (ratio < 1):
print(f'randomly sample {ratio} of the dataset {dataset_type}: {int((ratio * len(dataset)))}')
random_indices = np.random.choice(len(dataset), int((ratio * len(dataset))), replace=False)
subsample_dataset = torch.utils.data.Subset(dataset, random_indices)
subsample_dataset.collater = dataset.collater
return subsample_dataset
else:
return dataset
|
class ConcatDataset(ConcatDataset):
def __init__(self, datasets):
super().__init__(datasets)
def collater(self, samples):
all_keys = set()
for s in samples:
all_keys.update(s)
shared_keys = all_keys
for s in samples:
shared_keys = (shared_keys & set(s.keys()))
samples_shared_keys = []
for s in samples:
samples_shared_keys.append({k: s[k] for k in s.keys() if (k in shared_keys)})
return self.datasets[0].collater(samples_shared_keys)
|
class Flickr30k(CocoDataset):
CLASSES = ('object',)
def __init__(self, tokenizer, multimodal_cfg=None, vis_processor=None, ann_file=None, img_prefix=None, add_eos=True, ignore_instruction=True, filter_small=False, test_mode=False, max_gt_per_img=150):
self.multimodal_cfg = multimodal_cfg
self.tokenizer = tokenizer
self.ann_file = ann_file
self.img_prefix = img_prefix
self.vis_processor = vis_processor
self.max_gt_per_img = max_gt_per_img
self.add_eos = add_eos
self.ignore_instruction = ignore_instruction
self.filter_small = filter_small
self.test_mode = test_mode
img_norm_cfg = dict(mean=[(0.48145466 * 255), (0.4578275 * 255), (0.40821073 * 255)], std=[(0.26862954 * 255), (0.26130258 * 255), (0.27577711 * 255)], to_rgb=True)
train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotationsFlickr', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='DefaultFormatBundleFlickr'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
test_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotationsFlickr', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=224), dict(type='DefaultFormatBundleFlickr'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
if test_mode:
pipeline = test_pipeline
else:
pipeline = train_pipeline
if test_mode:
ann_file = self.ann_file
img_prefix = self.img_prefix
else:
ann_file = self.ann_file
img_prefix = self.img_prefix
train = dict(ann_file=ann_file, img_prefix=img_prefix, test_mode=False, pipeline=pipeline)
super(CocoDataset, self).__init__(**train)
self.num_classes = len(self.CLASSES)
self.id_cap_dict = dict()
self.begin_str = 'The <image> provides an overview of the picture.\n'
def _filter_imgs(self, min_size=32):
'Filter images too small or without ground truths.'
valid_inds = []
valid_img_ids = []
for (i, img_info) in enumerate(self.data_infos):
img_id = self.img_ids[i]
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def load_annotations(self, ann_file):
'Load annotation from COCO style annotation file.\n\n Args:\n ann_file (str): Path of annotation file.\n\n Returns:\n list[dict]: Annotation info from COCO api.\n '
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for (i, cat_id) in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
info['height'] = int(info['height'])
info['width'] = int(info['width'])
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert (len(set(total_ann_ids)) == len(total_ann_ids)), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def _parse_ann_info(self, img_info, ann_info):
'Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. "masks" are raw annotations and not decoded into binary masks.\n '
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
self.id_cap_dict[img_info['file_name']] = img_info['caption']
for (i, ann) in enumerate(ann_info):
if ann.get('ignore', False):
continue
(x1, y1, w, h) = ann['bbox']
inter_w = max(0, (min((x1 + w), img_info['width']) - max(x1, 0)))
inter_h = max(0, (min((y1 + h), img_info['height']) - max(y1, 0)))
if ((inter_w * inter_h) == 0):
continue
if ((ann['area'] <= 0) or (w < 1) or (h < 1)):
continue
if (ann['category_id'] in self.cat_ids):
pass
else:
raise ValueError('category_id not in self.cat_ids')
bbox = [x1, y1, (x1 + w), (y1 + h)]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_list = [img_info['caption'][atp[0]:atp[1]] for atp in ann['tokens_positive']]
gt_labels.append(gt_list[0])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(bboxes=gt_bboxes, labels=gt_labels, caption=img_info['caption'], bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=seg_map)
return ann
def process_text(self, data_item):
if isinstance(data_item['img'], list):
data_item = {k: v[0] for (k, v) in data_item.items()}
return self.train_process_test(data_item)
def train_process_test(self, data_item):
image = data_item['img'].data
ori_labels = data_item['gt_labels']
ori_bboxes = data_item['gt_bboxes'].data
sources = {'conversations': []}
question = random.choice(FINAL_QUESTIONS).strip()
s_bbox_string = ''
num_bboxes = min(len(ori_labels), self.max_gt_per_img)
for id in range(num_bboxes):
s_bbox_string = (s_bbox_string + f'region{(id + 1)} <bbox>,')
question = question.replace('<spi_descript>', s_bbox_string)
sources['conversations'].append({'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': self.id_cap_dict[data_item['img_metas'].data['filename'].split('/')[(- 1)]]})
shuffle_ids = torch.randperm(len(ori_labels))
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
select_bboxes = ori_bboxes[shuffle_ids]
select_labels = [ori_labels[i] for i in shuffle_ids]
for i in range(len(select_labels)):
question = random.choice(REGION_QUESTIONS).strip()
question = question.replace('<spi_descript>', f'region {(i + 1)}')
answer = select_labels[i]
sources['conversations'].append({'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
sources['conversations'][0]['value'] = (self.begin_str + sources['conversations'][0]['value'])
cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14))
assert (image.shape[1] == image.shape[2])
sources = preprocess_multimodal(copy.deepcopy([sources['conversations']]), self.multimodal_cfg, cur_token_len)
data_dict = preprocess(sources, self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0], labels=data_dict['labels'][0])
data_dict['image'] = image
select_bboxes = torch.cat([select_bboxes], dim=0)
select_bboxes = (copy.deepcopy(select_bboxes) / image.shape[1])
data_dict['bboxes'] = select_bboxes
data_dict['img_metas'] = data_item['img_metas'].data
return data_dict
def __getitem__(self, idx):
data_item = super().__getitem__(idx)
max_loops = 10
i = 0
while True:
if (i > max_loops):
raise ValueError('No gt_labels')
i += 1
if (len(data_item['gt_labels']) == 0):
idx = random.randint(0, (len(self) - 1))
data_item = super().__getitem__(idx)
else:
break
data_dict = self.process_text(data_item=data_item)
return data_dict
|
class RefCOCO(CocoDataset):
CLASSES = ('object',)
def __init__(self, tokenizer, multimodal_cfg=None, vis_processor=None, ann_file=None, img_prefix=None, add_eos=True, ignore_instruction=True, filter_small=False, test_mode=False, max_gt_per_img=15):
self.multimodal_cfg = multimodal_cfg
self.tokenizer = tokenizer
self.ann_file = ann_file
self.img_prefix = img_prefix
self.vis_processor = vis_processor
self.max_gt_per_img = max_gt_per_img
self.add_eos = add_eos
self.ignore_instruction = ignore_instruction
self.filter_small = filter_small
self.test_mode = test_mode
img_norm_cfg = dict(mean=[(0.48145466 * 255), (0.4578275 * 255), (0.40821073 * 255)], std=[(0.26862954 * 255), (0.26130258 * 255), (0.27577711 * 255)], to_rgb=True)
train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotationsFlickr', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='DefaultFormatBundleFlickr'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
test_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotationsFlickr', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=224), dict(type='DefaultFormatBundleFlickr'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
if test_mode:
pipeline = test_pipeline
else:
pipeline = train_pipeline
if test_mode:
ann_file = self.ann_file
img_prefix = self.img_prefix
else:
ann_file = self.ann_file
img_prefix = self.img_prefix
train = dict(ann_file=ann_file, img_prefix=img_prefix, test_mode=False, pipeline=pipeline)
super(CocoDataset, self).__init__(**train)
self.num_classes = len(self.CLASSES)
self.id_cap_dict = dict()
self.begin_str = "<image>\n I will provide you with only one region containing only one object, although there may be other objects present in the image. It is recommended that you describe the object's relative position with respect to other objects in the image, as well as its position within the image and its basic attributes."
def _filter_imgs(self, min_size=32):
'Filter images too small or without ground truths.'
valid_inds = []
valid_img_ids = []
for (i, img_info) in enumerate(self.data_infos):
img_id = self.img_ids[i]
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def load_annotations(self, ann_file):
'Load annotation from COCO style annotation file.\n\n Args:\n ann_file (str): Path of annotation file.\n\n Returns:\n list[dict]: Annotation info from COCO api.\n '
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for (i, cat_id) in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
num_remove_images = 0
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
if (len(info['caption'].split(' ')) < 3):
num_remove_images += 1
continue
info['filename'] = info['file_name'].split('_')[(- 1)]
info['height'] = int(info['height'])
info['width'] = int(info['width'])
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert (len(set(total_ann_ids)) == len(total_ann_ids)), f"Annotation ids in '{ann_file}' are not unique!"
print(f'Filtered {num_remove_images} from {self.ann_file} ')
return data_infos
def _parse_ann_info(self, img_info, ann_info):
'Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. "masks" are raw annotations and not decoded into binary masks.\n '
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
img_path = os.path.join(self.img_prefix, img_info['file_name'].split('_')[(- 1)])
self.id_cap_dict[img_info['file_name'].split('_')[(- 1)]] = img_info['caption']
for (i, ann) in enumerate(ann_info):
if ann.get('ignore', False):
continue
(x1, y1, w, h) = ann['bbox']
inter_w = max(0, (min((x1 + w), img_info['width']) - max(x1, 0)))
inter_h = max(0, (min((y1 + h), img_info['height']) - max(y1, 0)))
if ((inter_w * inter_h) == 0):
continue
if ((ann['area'] <= 0) or (w < 1) or (h < 1)):
continue
bbox = [x1, y1, (x1 + w), (y1 + h)]
gt_bboxes.append(bbox)
gt_labels.append(img_info['caption'])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(bboxes=gt_bboxes, labels=gt_labels, caption=img_info['caption'], bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=seg_map)
return ann
def process_text(self, data_item):
if isinstance(data_item['img'], list):
data_item = {k: v[0] for (k, v) in data_item.items()}
return self.train_process_test(data_item)
def train_process_test(self, data_item):
image = data_item['img'].data
ori_labels = data_item['gt_labels']
ori_bboxes = data_item['gt_bboxes'].data
sources = {'conversations': []}
shuffle_ids = torch.randperm(len(ori_labels))
if (len(shuffle_ids) > self.max_gt_per_img):
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
select_bboxes = ori_bboxes[shuffle_ids]
select_labels = [ori_labels[i] for i in shuffle_ids]
for i in range(len(select_labels)):
question = random.choice(QUESTIONS).strip()
question = question.replace('<spi_descript>', '<bbox>')
answer = select_labels[i]
sources['conversations'].append({'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
sources['conversations'][0]['value'] = (self.begin_str + sources['conversations'][0]['value'])
cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14))
assert (image.shape[1] == image.shape[2])
sources = preprocess_multimodal(copy.deepcopy([sources['conversations']]), self.multimodal_cfg, cur_token_len)
data_dict = preprocess(sources, self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0], labels=data_dict['labels'][0])
data_dict['image'] = image
ori_bboxes = select_bboxes
ori_bboxes = (copy.deepcopy(ori_bboxes) / image.shape[1])
data_dict['bboxes'] = ori_bboxes
data_dict['img_metas'] = data_item['img_metas'].data
return data_dict
def __getitem__(self, idx):
data_item = super().__getitem__(idx)
max_loops = 10
i = 0
while True:
if (i > max_loops):
raise ValueError('No gt_labels')
i += 1
if (len(data_item['gt_labels']) == 0):
idx = random.randint(0, (len(self) - 1))
data_item = super().__getitem__(idx)
else:
break
data_dict = self.process_text(data_item=data_item)
return data_dict
|
class RefCOCOP(RefCOCO):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.begin_str = "<image>\n I will provide you with only one region containing only one object, although there may be other objects present in the image. It is recommended that you describe the object's relative position with respect to other objects in the image and its basic attibuts, you should not give its position within the image"
|
class RefCOCOG(RefCOCO):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.begin_str = 'The <image> provides an overview of the picture.\n'
def train_process_test(self, data_item):
image = data_item['img'].data
ori_labels = data_item['gt_labels']
ori_bboxes = data_item['gt_bboxes'].data
sources = {'conversations': []}
shuffle_ids = torch.randperm(len(ori_labels))
if (len(shuffle_ids) > self.max_gt_per_img):
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
select_bboxes = ori_bboxes[shuffle_ids]
select_labels = [ori_labels[i] for i in shuffle_ids]
for i in range(len(select_labels)):
question = random.choice(REFG_QUESTIONS).strip()
question = question.replace('<spi_descript>', f'region{(i + 1)} <bbox>')
answer = select_labels[i]
sources['conversations'].append({'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
sources['conversations'][0]['value'] = (self.begin_str + sources['conversations'][0]['value'])
cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14))
assert (image.shape[1] == image.shape[2])
sources = preprocess_multimodal(copy.deepcopy([sources['conversations']]), self.multimodal_cfg, cur_token_len)
data_dict = preprocess(sources, self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0], labels=data_dict['labels'][0])
data_dict['image'] = image
ori_bboxes = select_bboxes
ori_bboxes = (copy.deepcopy(ori_bboxes) / image.shape[1])
data_dict['bboxes'] = ori_bboxes
data_dict['img_metas'] = data_item['img_metas'].data
return data_dict
|
class VGDATA(CocoDataset):
CLASSES = ('object',)
def __init__(self, tokenizer, multimodal_cfg=None, vis_processor=None, ann_file=None, img_prefix=None, add_eos=True, ignore_instruction=True, filter_small=False, test_mode=False, max_gt_per_img=15):
self.multimodal_cfg = multimodal_cfg
self.tokenizer = tokenizer
self.ann_file = ann_file
self.img_prefix = img_prefix
self.vis_processor = vis_processor
self.max_gt_per_img = max_gt_per_img
self.add_eos = add_eos
self.ignore_instruction = ignore_instruction
self.filter_small = filter_small
self.test_mode = test_mode
img_norm_cfg = dict(mean=[(0.48145466 * 255), (0.4578275 * 255), (0.40821073 * 255)], std=[(0.26862954 * 255), (0.26130258 * 255), (0.27577711 * 255)], to_rgb=True)
train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotationsFlickr', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='DefaultFormatBundleFlickr'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
test_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(224, 224), keep_ratio=False), dict(type='FilterAnnotationsFlickr', min_gt_bbox_wh=(2.0, 2.0)), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=224), dict(type='DefaultFormatBundleFlickr'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
if test_mode:
pipeline = test_pipeline
else:
pipeline = train_pipeline
if test_mode:
ann_file = self.ann_file
img_prefix = self.img_prefix
else:
ann_file = self.ann_file
img_prefix = self.img_prefix
train = dict(ann_file=ann_file, img_prefix=img_prefix, test_mode=False, pipeline=pipeline)
super(CocoDataset, self).__init__(**train)
self.num_classes = len(self.CLASSES)
self.begin_str = 'The <image> provides an overview of the picture.\n'
def _filter_imgs(self, min_size=32):
'Filter images too small or without ground truths.'
valid_inds = []
valid_img_ids = []
for (i, img_info) in enumerate(self.data_infos):
img_id = self.img_ids[i]
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def load_annotations(self, ann_file):
'Load annotation from COCO style annotation file.\n\n Args:\n ann_file (str): Path of annotation file.\n\n Returns:\n list[dict]: Annotation info from COCO api.\n '
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for (i, cat_id) in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
info['height'] = int(info['height'])
info['width'] = int(info['width'])
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert (len(set(total_ann_ids)) == len(total_ann_ids)), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def _parse_ann_info(self, img_info, ann_info):
'Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. "masks" are raw annotations and not decoded into binary masks.\n '
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for (i, ann) in enumerate(ann_info):
if ann.get('ignore', False):
continue
(x1, y1, w, h) = ann['bbox']
inter_w = max(0, (min((x1 + w), img_info['width']) - max(x1, 0)))
inter_h = max(0, (min((y1 + h), img_info['height']) - max(y1, 0)))
if ((inter_w * inter_h) == 0):
continue
if ((ann['area'] <= 0) or (w < 1) or (h < 1)):
continue
if (ann['category_id'] not in self.cat_ids):
continue
bbox = [x1, y1, (x1 + w), (y1 + h)]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(ann['caption'])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=seg_map)
return ann
def process_text(self, data_item):
if isinstance(data_item['img'], list):
data_item = {k: v[0] for (k, v) in data_item.items()}
return self.train_process_test(data_item)
def train_process_test(self, data_item):
image = data_item['img'].data
ori_labels = data_item['gt_labels']
ori_bboxes = data_item['gt_bboxes'].data
sources = {'conversations': []}
shuffle_ids = torch.randperm(len(ori_labels))
if (len(shuffle_ids) > self.max_gt_per_img):
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
select_bboxes = ori_bboxes[shuffle_ids]
select_labels = [ori_labels[i] for i in shuffle_ids]
for i in range(len(select_labels)):
question = random.choice(FINAL_QUESTIONS).strip()
question = question.replace('<spi_descript>', f'region{(i + 1)} <bbox>')
answer = select_labels[i]
sources['conversations'].append({'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
sources['conversations'][0]['value'] = (self.begin_str + sources['conversations'][0]['value'])
cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14))
assert (image.shape[1] == image.shape[2])
sources = preprocess_multimodal(copy.deepcopy([sources['conversations']]), self.multimodal_cfg, cur_token_len)
data_dict = preprocess(sources, self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0], labels=data_dict['labels'][0])
data_dict['image'] = image
select_bboxes = (copy.deepcopy(select_bboxes) / image.shape[1])
data_dict['bboxes'] = select_bboxes
data_dict['img_metas'] = data_item['img_metas'].data
return data_dict
def __getitem__(self, idx):
data_item = super().__getitem__(idx)
max_loops = 10
i = 0
while True:
if (i > max_loops):
raise ValueError('No gt_labels')
i += 1
if (len(data_item['gt_labels']) == 0):
idx = random.randint(0, (len(self) - 1))
data_item = super().__getitem__(idx)
else:
break
data_dict = self.process_text(data_item=data_item)
return data_dict
|
def forward(self, hidden_states: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, use_cache: bool=False) -> Tuple[(torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]])]:
'Input shape: Batch x Time x Channel.\n\n attention_mask: [bsz, q_len]\n '
(bsz, q_len, _) = hidden_states.size()
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
kv_seq_len = key_states.shape[(- 2)]
offset = 0
if (past_key_value is not None):
offset = past_key_value[0].shape[(- 2)]
kv_seq_len += offset
(cos, sin) = self.rotary_emb(value_states, seq_len=kv_seq_len)
(query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin, offset=offset)
assert (not output_attentions), 'output_attentions is not supported'
assert (not use_cache), 'use_cache is not supported'
assert (past_key_value is None), 'past_key_value is not supported'
qkv = torch.stack([query_states, key_states, value_states], dim=2)
qkv = qkv.transpose(1, 3)
key_padding_mask = attention_mask
if (key_padding_mask is None):
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
max_s = q_len
cu_q_lens = torch.arange(0, ((bsz + 1) * q_len), step=q_len, dtype=torch.int32, device=qkv.device)
output = flash_attn_unpadded_qkvpacked_func(qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True)
output = rearrange(output, '(b s) ... -> b s ...', b=bsz)
else:
nheads = qkv.shape[(- 2)]
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
(x_unpad, indices, cu_q_lens, max_s) = unpad_input(x, key_padding_mask)
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
output_unpad = flash_attn_unpadded_qkvpacked_func(x_unpad, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True)
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices, bsz, q_len), 'b s (h d) -> b s h d', h=nheads)
return (self.o_proj(rearrange(output, 'b s h d -> b s (h d)')), None, None)
|
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
return attention_mask
|
def replace_llama_attn_with_flash_attn():
transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = _prepare_decoder_attention_mask
transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
|
def unwrap_model(model: nn.Module) -> nn.Module:
'Recursively unwraps a model from potential containers (as used in\n distributed training).\n\n Args:\n model (`torch.nn.Module`): The model to unwrap.\n '
if hasattr(model, 'module'):
return unwrap_model(model.module)
else:
return model
|
class LLaVATrainer(Trainer):
def _save(self, output_dir: Optional[str]=None, state_dict=None):
if getattr(self.args, 'tune_mm_mlp_adapter', False):
_state_dict = state_dict
if (_state_dict is None):
model_to_save = unwrap_model(self.model)
_state_dict = model_to_save.state_dict()
weight_to_save = {}
keys_to_match = ['mm_projector', 'embed_tokens', 'embed_in']
for (k, v) in _state_dict.items():
if any(((key_match in k) for key_match in keys_to_match)):
weight_to_save[k] = v
current_folder = output_dir.split('/')[(- 1)]
parent_folder = os.path.dirname(output_dir)
if current_folder.startswith('checkpoint-'):
mm_projector_folder = os.path.join(parent_folder, 'mm_projector')
os.makedirs(mm_projector_folder, exist_ok=True)
torch.save(weight_to_save, os.path.join(mm_projector_folder, f'{current_folder}.bin'))
else:
torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin'))
super(LLaVATrainer, self)._save(output_dir, state_dict)
def create_optimizer(self):
opt_model = (self.model_wrapped if is_sagemaker_mp_enabled() else self.model)
if (self.optimizer is None):
decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if ('bias' not in name)]
train_str = 'spi_module'
if (os.environ.get('ONLY_SPI', None) and (not os.environ.get('PROJ', None))):
optimizer_grouped_parameters = [{'params': [p for (n, p) in opt_model.named_parameters() if ((train_str in n) and p.requires_grad)], 'weight_decay': 0.01}, {'params': [p for (n, p) in opt_model.named_parameters() if ((train_str not in n) and p.requires_grad)], 'weight_decay': 0.0, 'lr': 0.0}]
elif (os.environ.get('ONLY_SPI', None) and os.environ.get('PROJ', None)):
proj_train_str = 'proj'
spi_train_str = 'spi_module'
print('Only training SPI and PROJ')
optimizer_grouped_parameters = [{'params': [p for (n, p) in opt_model.named_parameters() if (((spi_train_str in n) or (proj_train_str in n)) and p.requires_grad)], 'weight_decay': 0.0}, {'params': [p for (n, p) in opt_model.named_parameters() if (((proj_train_str not in n) and (spi_train_str not in n)) and p.requires_grad)], 'weight_decay': 0.0, 'lr': 0.0}]
else:
optimizer_grouped_parameters = [{'params': [p for (n, p) in opt_model.named_parameters() if ((n in decay_parameters) and p.requires_grad)], 'weight_decay': self.args.weight_decay}, {'params': [p for (n, p) in opt_model.named_parameters() if ((n not in decay_parameters) and p.requires_grad)], 'weight_decay': 0.0}]
(optimizer_cls, optimizer_kwargs) = Trainer.get_optimizer_cls_and_kwargs(self.args)
if (self.sharded_ddp == ShardedDDPOption.SIMPLE):
if is_fairscale_available():
from fairscale.optim import OSS
else:
raise ImportError()
self.optimizer = OSS(params=optimizer_grouped_parameters, optim=optimizer_cls, **optimizer_kwargs)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if (optimizer_cls.__name__ == 'Adam8bit'):
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
skipped = 0
for module in opt_model.modules():
if isinstance(module, nn.Embedding):
skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
print(f'skipped {module}: {(skipped / (2 ** 20))}M params')
manager.register_module_override(module, 'weight', {'optim_bits': 32})
logger.debug(f'bitsandbytes: will optimize {module} in fp32')
print(f'skipped: {(skipped / (2 ** 20))}M params')
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
return self.optimizer
|
@dataclass
class ModelArguments():
model_name_or_path: Optional[str] = field(default='facebook/opt-125m')
version: Optional[str] = field(default='v0')
freeze_backbone: bool = field(default=False)
tune_mm_mlp_adapter: bool = field(default=False)
vision_tower: Optional[str] = field(default=None)
mm_vision_select_layer: Optional[int] = field(default=(- 1))
pretrain_mm_mlp_adapter: Optional[str] = field(default=None)
mm_use_im_start_end: bool = field(default=False)
with_spi: bool = field(default=True)
load_from: Optional[str] = field(default=None)
|
@dataclass
class DataArguments():
lazy_preprocess: bool = False
is_multimodal: bool = False
sep_image_conv_front: bool = False
image_token_len: int = 0
image_aspect_ratio: str = 'square'
dataset_config: Optional[str] = field(default='./gpt4roi/configs/stage1.py', metadata={'help': 'Path to the dataset config file.'})
|
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default='adamw_torch')
remove_unused_columns: bool = field(default=False)
freeze_mm_mlp_adapter: bool = field(default=False)
force_fsdp: bool = field(default=False)
model_max_length: int = field(default=512, metadata={'help': 'Maximum sequence length. Sequences will be right padded (and possibly truncated).'})
|
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
'Collects the state dict and dump to disk.'
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {key: value.cpu() for (key, value) in state_dict.items()}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict)
|
def smart_tokenizer_and_embedding_resize(special_tokens_dict: Dict, tokenizer: transformers.PreTrainedTokenizer, model: transformers.PreTrainedModel):
'Resize tokenizer and embedding.\n\n Note: This is the unoptimized version that may make your embedding size not be divisible by 64.\n '
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if (num_new_tokens > 0):
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True)
input_embeddings[(- num_new_tokens):] = input_embeddings_avg
output_embeddings[(- num_new_tokens):] = output_embeddings_avg
|
def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
'Tokenize a list of strings.'
tokenized_list = [tokenizer(text, return_tensors='pt', padding='longest', max_length=tokenizer.model_max_length, truncation=True) for text in strings]
input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
input_ids_lens = labels_lens = [tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list]
return dict(input_ids=input_ids, labels=labels, input_ids_lens=input_ids_lens, labels_lens=labels_lens)
|
def _mask_targets(target, tokenized_lens, speakers):
cur_idx = tokenized_lens[0]
tokenized_lens = tokenized_lens[1:]
target[:cur_idx] = IGNORE_INDEX
for (tokenized_len, speaker) in zip(tokenized_lens, speakers):
if (speaker == 'human'):
target[(cur_idx + 2):(cur_idx + tokenized_len)] = IGNORE_INDEX
cur_idx += tokenized_len
|
def _add_speaker_and_signal(header, source, get_conversation=True):
'Add speaker and start/end signal on each round.'
BEGIN_SIGNAL = '### '
END_SIGNAL = '\n'
conversation = header
for sentence in source:
from_str = sentence['from']
if (from_str.lower() == 'human'):
from_str = conversation_lib.default_conversation.roles[0]
elif (from_str.lower() == 'gpt'):
from_str = conversation_lib.default_conversation.roles[1]
else:
from_str = 'unknown'
sentence['value'] = ((((BEGIN_SIGNAL + from_str) + ': ') + sentence['value']) + END_SIGNAL)
if get_conversation:
conversation += sentence['value']
conversation += BEGIN_SIGNAL
return conversation
|
def preprocess_multimodal(sources: Sequence[str], multimodal_cfg: dict, cur_token_len: int) -> Dict:
is_multimodal = multimodal_cfg['is_multimodal']
image_token_len = cur_token_len
if (not is_multimodal):
return sources
for source in sources:
if multimodal_cfg['sep_image_conv_front']:
assert (DEFAULT_IMAGE_TOKEN in source[0]['value'])
source[0]['value'] = source[0]['value'].replace(DEFAULT_IMAGE_TOKEN, '').strip()
source[0]['value'] = ((((DEFAULT_IMAGE_TOKEN + conversation_lib.default_conversation.sep) + conversation_lib.default_conversation.roles[0]) + ': ') + source[0]['value'])
for sentence in source:
replace_token = (DEFAULT_IMAGE_PATCH_TOKEN * image_token_len)
if multimodal_cfg['use_im_start_end']:
replace_token = ((DEFAULT_IM_START_TOKEN + replace_token) + DEFAULT_IM_END_TOKEN)
sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, replace_token)
return sources
|
def preprocess_v1(sources, tokenizer: transformers.PreTrainedTokenizer) -> Dict:
conv = conversation_lib.default_conversation.copy()
roles = {'human': conv.roles[0], 'gpt': conv.roles[1]}
conversations = []
for (i, source) in enumerate(sources):
if (roles[source[0]['from']] != conv.roles[0]):
source = source[1:]
conv.messages = []
for (j, sentence) in enumerate(source):
role = roles[sentence['from']]
assert (role == conv.roles[(j % 2)]), f'{i}'
conv.append_message(role, sentence['value'])
conversations.append(conv.get_prompt())
input_ids = tokenizer(conversations, return_tensors='pt', padding='longest', max_length=tokenizer.model_max_length, truncation=True).input_ids
targets = input_ids.clone()
assert (conv.sep_style == conversation_lib.SeparatorStyle.TWO)
sep = ((conv.sep + conv.roles[1]) + ': ')
for (conversation, target) in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_INDEX
for (i, rou) in enumerate(rounds):
if (rou == ''):
break
parts = rou.split(sep)
if (len(parts) != 2):
break
parts[0] += sep
round_len = len(tokenizer(rou).input_ids)
instruction_len = (len(tokenizer(parts[0]).input_ids) - 2)
target[cur_len:(cur_len + instruction_len)] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if (cur_len < tokenizer.model_max_length):
if (cur_len != total_len):
target[:] = IGNORE_INDEX
print(f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}. (ignored)')
return dict(input_ids=input_ids, labels=targets)
|
def preprocess_mpt(sources, tokenizer: transformers.PreTrainedTokenizer) -> Dict:
conv = conversation_lib.default_conversation.copy()
roles = {'human': conv.roles[0], 'gpt': conv.roles[1]}
conversations = []
for (i, source) in enumerate(sources):
if (roles[source[0]['from']] != conv.roles[0]):
source = source[1:]
conv.messages = []
for (j, sentence) in enumerate(source):
role = roles[sentence['from']]
assert (role == conv.roles[(j % 2)]), f'{i}'
conv.append_message(role, sentence['value'])
conversations.append(conv.get_prompt())
input_ids = tokenizer(conversations, return_tensors='pt', padding='longest', max_length=tokenizer.model_max_length, truncation=True).input_ids
targets = input_ids.clone()
assert (conv.sep_style == conversation_lib.SeparatorStyle.MPT)
sep = (conv.sep + conv.roles[1])
for (conversation, target) in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep)
re_rounds = [conv.sep.join(rounds[:3])]
for conv_idx in range(3, len(rounds), 2):
re_rounds.append(conv.sep.join(rounds[conv_idx:(conv_idx + 2)]))
cur_len = 0
target[:cur_len] = IGNORE_INDEX
for (i, rou) in enumerate(re_rounds):
if (rou == ''):
break
parts = rou.split(sep)
if (len(parts) != 2):
break
parts[0] += sep
round_len = (len(tokenizer(rou).input_ids) + len(tokenizer(conv.sep).input_ids))
instruction_len = len(tokenizer(parts[0]).input_ids)
target[cur_len:(cur_len + instruction_len)] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if (cur_len < tokenizer.model_max_length):
if (cur_len != total_len):
target[:] = IGNORE_INDEX
print(f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}. (ignored)')
return dict(input_ids=input_ids, labels=targets)
|
def preprocess(sources: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
"Given a list of sources, each is a conversation list.\n\n This transform:\n 1. Add signal '### ' at the beginning each sentence, with end signal '\n';\n 2. Concatenate conversations together;\n 3. Tokenize the concatenated conversation;\n 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.\n "
if (conversation_lib.default_conversation.version == 'v1'):
return preprocess_v1(sources, tokenizer)
if (conversation_lib.default_conversation.version == 'mpt'):
return preprocess_mpt(sources, tokenizer)
conversations = []
for source in sources:
header = f'''{conversation_lib.default_conversation.system}
'''
conversation = _add_speaker_and_signal(header, source)
conversations.append(conversation)
conversations_tokenized = _tokenize_fn(conversations, tokenizer)
input_ids = conversations_tokenized['input_ids']
targets = copy.deepcopy(input_ids)
for (target, source) in zip(targets, sources):
tokenized_lens = _tokenize_fn(([header] + [s['value'] for s in source]), tokenizer)['input_ids_lens']
speakers = [sentence['from'] for sentence in source]
_mask_targets(target, tokenized_lens, speakers)
return dict(input_ids=input_ids, labels=targets)
|
class SupervisedDataset(Dataset):
'Dataset for supervised fine-tuning.'
def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer):
super(SupervisedDataset, self).__init__()
logging.warning('Loading data...')
list_data_dict = json.load(open(data_path, 'r'))
logging.warning('Formatting inputs...')
sources = [example['conversations'] for example in list_data_dict]
data_dict = preprocess(sources, tokenizer)
self.input_ids = data_dict['input_ids']
self.labels = data_dict['labels']
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[(str, torch.Tensor)]:
return dict(input_ids=self.input_ids[i], labels=self.labels[i])
|
class LazySupervisedDataset(Dataset):
'Dataset for supervised fine-tuning.'
def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer, multimodal_cfg: dict):
super(LazySupervisedDataset, self).__init__()
logging.warning('Loading data...')
list_data_dict = json.load(open(data_path, 'r'))
logging.warning('Formatting inputs...Skip in lazy mode')
self.tokenizer = tokenizer
self.list_data_dict = list_data_dict
self.multimodal_cfg = multimodal_cfg
def __len__(self):
return len(self.list_data_dict)
def __getitem__(self, i) -> Dict[(str, torch.Tensor)]:
sources = self.list_data_dict[i]
if isinstance(i, int):
sources = [sources]
assert (len(sources) == 1), "Don't know why it is wrapped to a list"
if ('image' in sources[0]):
image_file = self.list_data_dict[i]['image']
image_folder = self.multimodal_cfg['image_folder']
processor = self.multimodal_cfg['image_processor']
image = Image.open(os.path.join(image_folder, image_file)).convert('RGB')
if (self.multimodal_cfg['image_aspect_ratio'] == 'keep'):
(max_hw, min_hw) = (max(image.size), min(image.size))
aspect_ratio = (max_hw / min_hw)
(max_len, min_len) = (448, 224)
shortest_edge = int(min((max_len / aspect_ratio), min_len))
image = processor.preprocess(image, return_tensors='pt', do_center_crop=False, size={'shortest_edge': shortest_edge})['pixel_values'][0]
elif (self.multimodal_cfg['image_aspect_ratio'] == 'pad'):
def expand2square(pil_img, background_color):
(width, height) = pil_img.size
if (width == height):
return pil_img
elif (width > height):
result = Image.new(pil_img.mode, (width, width), background_color)
result.paste(pil_img, (0, ((width - height) // 2)))
return result
else:
result = Image.new(pil_img.mode, (height, height), background_color)
result.paste(pil_img, (((height - width) // 2), 0))
return result
image = expand2square(image, tuple((int((x * 255)) for x in processor.image_mean)))
image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
else:
image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14))
sources = preprocess_multimodal(copy.deepcopy([e['conversations'] for e in sources]), self.multimodal_cfg, cur_token_len)
else:
sources = copy.deepcopy([e['conversations'] for e in sources])
data_dict = preprocess(sources, self.tokenizer)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0], labels=data_dict['labels'][0])
if ('image' in self.list_data_dict[i]):
data_dict['image'] = image
elif self.multimodal_cfg['is_multimodal']:
crop_size = self.multimodal_cfg['image_processor'].crop_size
data_dict['image'] = torch.zeros(3, crop_size['height'], crop_size['width'])
return data_dict
|
@dataclass
class DataCollatorForSupervisedDataset(object):
'Collate examples for supervised fine-tuning.'
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances: Sequence[Dict]) -> Dict[(str, torch.Tensor)]:
(input_ids, labels) = tuple(([instance[key] for instance in instances] for key in ('input_ids', 'labels')))
input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)
batch = dict(input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id))
if ('image' in instances[0]):
images = [instance['image'] for instance in instances]
if all((((x is not None) and (x.shape == images[0].shape)) for x in images)):
batch['images'] = torch.stack(images)
else:
batch['images'] = images
return batch
|
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
'Make dataset and collator for supervised fine-tuning.'
dataset_cls = (LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset)
train_dataset = dataset_cls(tokenizer=tokenizer, data_path=data_args.data_path, multimodal_cfg=dict(is_multimodal=data_args.is_multimodal, sep_image_conv_front=data_args.sep_image_conv_front, image_token_len=data_args.image_token_len, image_folder=data_args.image_folder, image_aspect_ratio=data_args.image_aspect_ratio, use_im_start_end=getattr(data_args, 'mm_use_im_start_end', False), image_processor=getattr(data_args, 'image_processor', None)))
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator)
|
def train():
parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
if (model_args.vision_tower is not None):
if ('mpt' in model_args.model_name_or_path):
model = LlavaMPTForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir)
elif model_args.with_spi:
from gpt4roi.models.spi_llava import SPILlavaMPTForCausalLM
model = SPILlavaMPTForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir)
else:
model = LlavaLlamaForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir)
else:
model = transformers.LlamaForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir)
model.config.use_cache = False
if model_args.freeze_backbone:
model.model.requires_grad_(False)
if ('mpt' in model_args.model_name_or_path):
tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side='right')
else:
tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side='right', use_fast=False)
if (model_args.version == 'v0'):
if (tokenizer.pad_token is None):
smart_tokenizer_and_embedding_resize(special_tokens_dict=dict(pad_token=DEFAULT_PAD_TOKEN), tokenizer=tokenizer, model=model)
if ('llama' in model_args.model_name_or_path):
tokenizer.add_special_tokens({'eos_token': DEFAULT_EOS_TOKEN, 'bos_token': DEFAULT_BOS_TOKEN, 'unk_token': DEFAULT_UNK_TOKEN})
else:
tokenizer.pad_token = tokenizer.unk_token
if ('mpt' in model_args.model_name_or_path):
conversation_lib.default_conversation = conversation_lib.conv_templates['mpt']
else:
conversation_lib.default_conversation = conversation_lib.conv_templates['vicuna_v1_1']
if (model_args.vision_tower is not None):
model_vision_dict = model.get_model().initialize_vision_modules(vision_tower=model_args.vision_tower, mm_vision_select_layer=model_args.mm_vision_select_layer, pretrain_mm_mlp_adapter=model_args.pretrain_mm_mlp_adapter)
dtype = torch.float32
if training_args.fp16:
dtype = torch.float16
if training_args.bf16:
dtype = torch.bfloat16
model.get_model().vision_tower[0].to(dtype=dtype, device=training_args.device)
vision_config = model_vision_dict['vision_config']
data_args.image_token_len = model_vision_dict['image_token_len']
data_args.image_processor = model_vision_dict['image_processor']
data_args.is_multimodal = True
model.config.tune_mm_mlp_adapter = training_args.tune_mm_mlp_adapter = model_args.tune_mm_mlp_adapter
if model_args.tune_mm_mlp_adapter:
model.requires_grad_(False)
for p in model.get_model().mm_projector.parameters():
p.requires_grad = True
model.config.freeze_mm_mlp_adapter = training_args.freeze_mm_mlp_adapter
if training_args.freeze_mm_mlp_adapter:
for p in model.get_model().mm_projector.parameters():
p.requires_grad = False
model.config.mm_use_im_start_end = data_args.mm_use_im_start_end = model_args.mm_use_im_start_end
vision_config.use_im_start_end = training_args.use_im_start_end = model_args.mm_use_im_start_end
model.config.sep_image_conv_front = data_args.sep_image_conv_front
model.initialize_vision_tokenizer(mm_use_im_start_end=model_args.mm_use_im_start_end, tokenizer=tokenizer, device=training_args.device, tune_mm_mlp_adapter=model_args.tune_mm_mlp_adapter, pretrain_mm_mlp_adapter=model_args.pretrain_mm_mlp_adapter)
params_no_grad = [n for (n, p) in model.named_parameters() if (not p.requires_grad)]
if (os.environ.get('SAVE_MEMORY', '0') == '1'):
model.requires_grad_(False)
model.half()
model.lm_head.requires_grad_(True)
model.model.spi_module.to(torch.float32)
if (len(params_no_grad) > 0):
if ((training_args.fsdp is not None) and (len(training_args.fsdp) > 0)):
if (len(params_no_grad) < 10):
print('[WARNING] Attempting to use FSDP while {} parameters do not require gradients: {}'.format(len(params_no_grad), params_no_grad))
else:
print('[WARNING] Attempting to use FSDP while {} parameters do not require gradients: {}...(omitted)'.format(len(params_no_grad), ', '.join(params_no_grad[:10])))
print('[WARNING] Attempting to use FSDP with partially frozen paramters, this is experimental.')
print('[WARNING] As of 4/30/23, this feature requires PyTorch-nightly build. See here for details: https://github.com/haotian-liu/LLaVA#experimental-use-fsdp-to-save-memory-in-pretraining')
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
def patch_FSDP_use_orig_params(func):
def wrap_func(*args, **kwargs):
use_orig_params = kwargs.pop('use_orig_params', True)
return func(*args, **kwargs, use_orig_params=use_orig_params)
return wrap_func
FSDP.__init__ = patch_FSDP_use_orig_params(FSDP.__init__)
from gpt4roi.datasets.data_modules import make_multitask_data_module
data_module = make_multitask_data_module(tokenizer=tokenizer, data_args=data_args)
if model_args.load_from:
print(f'load ckpt from {model_args.load_from}')
model.from_pretrained(model_args.load_from)
if os.environ.get('ONLY_SPI', None):
for (n, p) in model.named_parameters():
if ('spi_module' not in n):
p.requires_grad = False
else:
p.requires_grad = True
print(n)
if os.environ.get('PROJ', None):
for (n, p) in model.named_parameters():
if ('mm_projector' in n):
p.requires_grad = True
print(n)
trainer = LLaVATrainer(model=model, tokenizer=tokenizer, args=training_args, **data_module)
print('all trainable parameters')
for (n, p) in model.named_parameters():
if p.requires_grad:
print(n)
if list(pathlib.Path(training_args.output_dir).glob('checkpoint-*')):
print('resume', ('---' * 200))
trainer.train(resume_from_checkpoint=True)
else:
trainer.train()
trainer.save_state()
safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
|
class SeparatorStyle(Enum):
'Different separator style.'
SINGLE = auto()
TWO = auto()
MPT = auto()
|
@dataclasses.dataclass
class Conversation():
'A class that keeps all conversation history.'
system: str
roles: List[str]
messages: List[List[str]]
offset: int
sep_style: SeparatorStyle = SeparatorStyle.SINGLE
sep: str = '###'
sep2: str = None
version: str = 'Unknown'
skip_next: bool = False
def get_prompt(self):
if (self.sep_style == SeparatorStyle.SINGLE):
ret = (self.system + self.sep)
for (role, message) in self.messages:
if message:
if (type(message) is tuple):
(message, _, _) = message
ret += (((role + ': ') + message) + self.sep)
else:
ret += (role + ':')
return ret
elif (self.sep_style == SeparatorStyle.TWO):
seps = [self.sep, self.sep2]
ret = (self.system + seps[0])
for (i, (role, message)) in enumerate(self.messages):
if message:
if (type(message) is tuple):
(message, _, _) = message
ret += (((role + ': ') + message) + seps[(i % 2)])
else:
ret += (role + ':')
return ret
if (self.sep_style == SeparatorStyle.MPT):
ret = (self.system + self.sep)
for (role, message) in self.messages:
if message:
if (type(message) is tuple):
(message, _, _) = message
ret += ((role + message) + self.sep)
else:
ret += role
return ret
else:
raise ValueError(f'Invalid style: {self.sep_style}')
def append_message(self, role, message):
self.messages.append([role, message])
def get_images(self, return_pil=False):
images = []
for (i, (role, msg)) in enumerate(self.messages[self.offset:]):
if ((i % 2) == 0):
if (type(msg) is tuple):
import base64
from io import BytesIO
from PIL import Image
(msg, image, image_process_mode) = msg
if (image_process_mode == 'Pad'):
def expand2square(pil_img, background_color=(122, 116, 104)):
(width, height) = pil_img.size
if (width == height):
return pil_img
elif (width > height):
result = Image.new(pil_img.mode, (width, width), background_color)
result.paste(pil_img, (0, ((width - height) // 2)))
return result
else:
result = Image.new(pil_img.mode, (height, height), background_color)
result.paste(pil_img, (((height - width) // 2), 0))
return result
image = expand2square(image)
elif (image_process_mode == 'Crop'):
pass
elif (image_process_mode == 'Resize'):
image = image.resize((224, 224))
else:
raise ValueError(f'Invalid image_process_mode: {image_process_mode}')
(max_hw, min_hw) = (max(image.size), min(image.size))
aspect_ratio = (max_hw / min_hw)
(max_len, min_len) = (800, 400)
shortest_edge = int(min((max_len / aspect_ratio), min_len, min_hw))
longest_edge = int((shortest_edge * aspect_ratio))
(W, H) = image.size
if (H > W):
(H, W) = (longest_edge, shortest_edge)
else:
(H, W) = (shortest_edge, longest_edge)
image = image.resize((W, H))
if return_pil:
images.append(image)
else:
buffered = BytesIO()
image.save(buffered, format='JPEG')
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
images.append(img_b64_str)
return images
def to_gradio_chatbot(self):
ret = []
for (i, (role, msg)) in enumerate(self.messages[self.offset:]):
if ((i % 2) == 0):
if (type(msg) is tuple):
import base64
from io import BytesIO
(msg, image, image_process_mode) = msg
(max_hw, min_hw) = (max(image.size), min(image.size))
aspect_ratio = (max_hw / min_hw)
(max_len, min_len) = (800, 400)
shortest_edge = int(min((max_len / aspect_ratio), min_len, min_hw))
longest_edge = int((shortest_edge * aspect_ratio))
(W, H) = image.size
if (H > W):
(H, W) = (longest_edge, shortest_edge)
else:
(H, W) = (shortest_edge, longest_edge)
image = image.resize((W, H))
buffered = BytesIO()
image.save(buffered, format='JPEG')
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="user upload image" />'
msg = msg.replace('<image>', img_str)
ret.append([msg, None])
else:
ret[(- 1)][(- 1)] = msg
return ret
def copy(self):
return Conversation(system=self.system, roles=self.roles, messages=[[x, y] for (x, y) in self.messages], offset=self.offset, sep_style=self.sep_style, sep=self.sep, sep2=self.sep2)
def dict(self):
if (len(self.get_images()) > 0):
return {'system': self.system, 'roles': self.roles, 'messages': [[x, (y[0] if (type(y) is tuple) else y)] for (x, y) in self.messages], 'offset': self.offset, 'sep': self.sep, 'sep2': self.sep2}
return {'system': self.system, 'roles': self.roles, 'messages': self.messages, 'offset': self.offset, 'sep': self.sep, 'sep2': self.sep2}
|
def main(args):
data_path = pathlib.Path(args.data_path)
with data_path.open() as f:
data = json.load(f)
(prompt_input, prompt_no_input) = (PROMPT_DICT['prompt_input'], PROMPT_DICT['prompt_no_input'])
sources = [(prompt_input.format_map(example) if (example.get('input', '') != '') else prompt_no_input.format_map(example)) for example in data]
targets = [example['output'] for example in data]
new_data = []
cnt = 1
for (s, t) in zip(sources, targets):
new_data.append({'id': str(cnt), 'conversations': [{'from': 'human', 'value': s}, {'from': 'gpt', 'value': t}]})
cnt += 1
json.dump(new_data, open(args.output_path, 'w'), indent=2)
|
def reformat_code(val: str) -> str:
return re.sub(code_lang_pattern, code_lang_format, val)
|
def html_to_markdown(val: str) -> str:
val = re.sub(div_pattern, '', val)
val = re.sub(span_pattern, '', val)
val = markdownify.markdownify(val).strip()
val = reformat_code(val)
noise = re.search(regenerate_pattern, val)
if (noise and (noise.start() == 0)):
val = val[noise.end():]
val = re.sub(copy_chars_pattern, '', val)
val = re.sub(copy_code_pattern, '', val)
val = val.replace('\n\n\n', '\n').strip()
return val
|
def contain_blocked_words(val: str) -> bool:
blocked_words = ['openai', 'chatgpt']
for w in blocked_words:
if (w in val.lower()):
return True
return False
|
def clean_html_one_sample(sample):
roles = ['human', 'gpt']
if (len(sample['conversations']) <= 1):
return (sample, 1)
if (sample['conversations'][0]['from'] != 'human'):
sample['conversations'] = sample['conversations'][1:]
if (len(sample['conversations']) <= 1):
return (sample, 1)
if (sample['conversations'][(- 1)]['from'] == 'human'):
sample['conversations'] = sample['conversations'][:(- 1)]
if (len(sample['conversations']) <= 1):
return (sample, 1)
for (i, c) in enumerate(sample['conversations']):
if (c['from'] != roles[(i % 2)]):
return (sample, 2)
if contain_blocked_words(c['value']):
return (sample, 3)
try:
new_val = html_to_markdown(c['value'])
except (bs4.builder.ParserRejectedMarkup, AssertionError):
return (sample, 4)
c['value'] = new_val
return (sample, 0)
|
def clean_html_all(content, begin, end):
'\n Clean the source html files.\n '
cnt_skip = 0
cnt_blocked_words = 0
cnt_wrong_format = 0
cnt_parser_error = 0
cnt_too_short = 0
cnt_id_duplication = 0
cnt_value_duplication = 0
cnt_tag = 0
content = content[begin:end]
processed = []
with ProcessPoolExecutor() as executor:
for result in tqdm(executor.map(clean_html_one_sample, content), total=len(content)):
processed.append(result)
visited = {}
new_content = []
for (sample, error_code) in tqdm(processed):
cid = sample['id']
skipped = True
if (error_code != 0):
if (error_code == 1):
print(f'id {cid} is too short')
cnt_too_short += 1
elif (error_code == 2):
print(f'id {cid} has a wrong format')
cnt_wrong_format += 1
elif (error_code == 3):
print(f'id {cid} contains blocked words')
cnt_blocked_words += 1
elif (error_code == 4):
print(f'id {cid} contains parser errors')
cnt_parser_error += 1
else:
raise ValueError(f'Invalid error_code: {error_code}')
elif (cid in visited):
print(f'id {cid} is an id duplication of {visited[cid]}')
cnt_id_duplication += 1
elif ((sample['conversations'][1]['value'], len(sample['conversations'])) in visited):
key = (sample['conversations'][1]['value'], len(sample['conversations']))
print(f'id {cid} is a value duplication of {visited[key]}')
cnt_value_duplication += 1
else:
key = (sample['conversations'][1]['value'], len(sample['conversations']))
visited[cid] = visited[key] = cid
skipped = False
if (not skipped):
new_content.append(sample)
else:
cnt_skip += 1
print(f'total: {len(content)}, skip: {cnt_skip}, new: {len(new_content)}, cnt_blocked_words: {cnt_blocked_words}, cnt_parser_error: {cnt_parser_error}, cnt_wrong_format: {cnt_wrong_format}, cnt_too_short: {cnt_too_short}, cnt_id_duplication: {cnt_id_duplication}, cnt_value_duplication: {cnt_value_duplication}, ')
return new_content
|
def main(args):
content = json.load(open(args['in_file'], 'r'))
content = clean_html_all(content, args['begin'], args['end'])
json.dump(content, open(args['out_file'], 'w'), indent=2)
|
def skip(conv, args):
if ((args.lang != 'all') or (args.skip_lang is not None)):
text = '\n'.join([x['value'] for x in conv['conversations']])
try:
lang_code = Detector(text).language.code
except (pycld2.error, polyglot.detect.base.UnknownLanguage):
lang_code = 'unknown'
if ((args.lang != 'all') and (lang_code != args.lang)):
return True
if (lang_code == args.skip_lang):
return True
if args.reduce_rep:
for sentence in conv['conversations']:
val = sentence['value']
sub = re.search('(\\d)\\1{8}', val)
if (sub is not None):
return True
return False
|
def split_sample(sample, start_idx, end_idx):
end_speaker = sample['conversations'][end_idx]['from']
end_idx = ((end_idx + 1) if (end_speaker != 'human') else end_idx)
return {'id': ((sample['id'] + '_') + str(start_idx)), 'conversations': sample['conversations'][start_idx:end_idx]}
|
def split_contents(content, begin, end, tokenizer, max_length):
'\n Keep the maximum round of conversations within the max token length constraint\n '
content = content[begin:end]
new_content = []
for sample in tqdm.tqdm(content):
tokenized_lens = []
for c in sample['conversations']:
from_str = c['from']
if (from_str.lower() == 'human'):
from_str = conversation_lib.default_conversation.roles[0]
elif (from_str.lower() == 'gpt'):
from_str = conversation_lib.default_conversation.roles[1]
else:
from_str = 'unknown'
sentence = ((((BEGIN_SIGNAL + from_str) + ': ') + c['value']) + END_SIGNAL)
length = tokenizer(sentence, return_tensors='pt', padding='longest').input_ids.ne(tokenizer.pad_token_id).sum().item()
tokenized_lens.append(length)
num_tokens = 0
start_idx = 0
for (idx, l) in enumerate(tokenized_lens):
if ((num_tokens + l) > max_length):
new_content.append(split_sample(sample, start_idx, idx))
start_idx = idx
num_tokens = l
else:
num_tokens += l
if (idx == (len(tokenized_lens) - 1)):
new_content.append(split_sample(sample, start_idx, idx))
print(f'total: {len(content)}, new: {len(new_content)}')
return new_content
|
def main(args):
content = json.load(open(args.in_file, 'r'))
tokenizer = transformers.AutoTokenizer.from_pretrained(args.model_name_or_path, model_max_length=args.max_length, padding_side='right', use_fast=False)
if (tokenizer.pad_token is None):
tokenizer.add_special_tokens(dict(pad_token=DEFAULT_PAD_TOKEN))
content = split_contents(content, args.begin, args.end, tokenizer, args.max_length)
json.dump(content, open(args.out_file, 'w'), indent=2)
|
@ray.remote(num_cpus=4)
def get_eval(content: str, max_tokens: int):
while True:
try:
response = openai.ChatCompletion.create(model='gpt-4', messages=[{'role': 'system', 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'}, {'role': 'user', 'content': content}], temperature=0.2, max_tokens=max_tokens)
break
except openai.error.RateLimitError:
pass
except Exception as e:
print(e)
time.sleep(1)
print('success!')
return response['choices'][0]['message']['content']
|
def parse_score(review):
try:
score_pair = review.split('\n')[0]
score_pair = score_pair.replace(',', ' ')
sp = score_pair.split(' ')
if (len(sp) == 2):
return [float(sp[0]), float(sp[1])]
else:
print('error', review)
return [(- 1), (- 1)]
except Exception as e:
print(e)
print('error', review)
return [(- 1), (- 1)]
|
@ray.remote(num_cpus=4)
def get_eval(content: str, max_tokens: int):
while True:
try:
response = openai.ChatCompletion.create(model='gpt-4', messages=[{'role': 'system', 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'}, {'role': 'user', 'content': content}], temperature=0.2, max_tokens=max_tokens)
break
except openai.error.RateLimitError:
pass
except Exception as e:
print(e)
time.sleep(1)
print('success!')
return response['choices'][0]['message']['content']
|
def parse_score(review):
try:
score_pair = review.split('\n')[0]
score_pair = score_pair.replace(',', ' ')
sp = score_pair.split(' ')
if (len(sp) == 2):
return [float(sp[0]), float(sp[1])]
else:
print('error', review)
return [(- 1), (- 1)]
except Exception as e:
print(e)
print('error', review)
return [(- 1), (- 1)]
|
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--base-dir', type=str)
parser.add_argument('--result-file', type=str)
parser.add_argument('--output-file', type=str)
parser.add_argument('--output-result', type=str)
parser.add_argument('--split', type=str, default='test')
parser.add_argument('--options', type=list, default=['A', 'B', 'C', 'D', 'E'])
return parser.parse_args()
|
def convert_caps(results):
fakecaps = []
for result in results:
image_id = result['question_id']
caption = result['text']
fakecaps.append({'image_id': int(image_id), 'caption': caption})
return fakecaps
|
def get_pred_idx(prediction, choices, options):
"\n Get the index (e.g. 2) from the prediction (e.g. 'C')\n "
if (prediction in options[:len(choices)]):
return options.index(prediction)
else:
return random.choice(range(len(choices)))
|
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--base-dir', type=str)
parser.add_argument('--gpt4-result', type=str)
parser.add_argument('--our-result', type=str)
parser.add_argument('--split', type=str, default='test')
parser.add_argument('--options', type=list, default=['A', 'B', 'C', 'D', 'E'])
return parser.parse_args()
|
def convert_caps(results):
fakecaps = []
for result in results:
image_id = result['question_id']
caption = result['text']
fakecaps.append({'image_id': int(image_id), 'caption': caption})
return fakecaps
|
def get_pred_idx(prediction, choices, options):
"\n Get the index (e.g. 2) from the prediction (e.g. 'C')\n "
if (prediction in options[:len(choices)]):
return options.index(prediction)
else:
return random.choice(range(len(choices)))
|
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--base-dir', type=str)
parser.add_argument('--gpt4-result', type=str)
parser.add_argument('--requery-result', type=str)
parser.add_argument('--our-result', type=str)
parser.add_argument('--output-result', type=str)
parser.add_argument('--split', type=str, default='test')
parser.add_argument('--options', type=list, default=['A', 'B', 'C', 'D', 'E'])
return parser.parse_args()
|
def convert_caps(results):
fakecaps = []
for result in results:
image_id = result['question_id']
caption = result['text']
fakecaps.append({'image_id': int(image_id), 'caption': caption})
return fakecaps
|
def get_pred_idx(prediction, choices, options):
"\n Get the index (e.g. 2) from the prediction (e.g. 'C')\n "
if (prediction in options[:len(choices)]):
return options.index(prediction)
else:
return random.choice(range(len(choices)))
|
def read_jsonl(path: str, key: str=None):
data = []
with open(os.path.expanduser(path)) as f:
for line in f:
if (not line):
continue
data.append(json.loads(line))
if (key is not None):
data.sort(key=(lambda x: x[key]))
data = {item[key]: item for item in data}
return data
|
def trim_hanging_lines(s: str, n: int) -> str:
s = s.strip()
for _ in range(n):
s = s.split('\n', 1)[1].strip()
return s
|
def get_answer(question_id: int, question: str, max_tokens: int):
ans = {'answer_id': shortuuid.uuid(), 'question_id': question_id, 'model_id': MODEL_ID}
for _ in range(3):
try:
response = openai.ChatCompletion.create(model=MODEL, messages=[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': question}], max_tokens=max_tokens)
ans['text'] = response['choices'][0]['message']['content']
return ans
except Exception as e:
print('[ERROR]', e)
ans['text'] = '#ERROR#'
time.sleep(1)
return ans
|
def consolidate_ckpt(src_path, dst_path):
print('Loading model')
auto_upgrade(src_path)
src_model = AutoModelForCausalLM.from_pretrained(src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
src_tokenizer = AutoTokenizer.from_pretrained(src_path)
src_model.save_pretrained(dst_path)
src_tokenizer.save_pretrained(dst_path)
|
def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):
'Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n '
sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]
tokenizer.add_tokens(sentinels_to_add, special_tokens=True)
if (tokenizer.pad_token is None):
tokenizer.add_tokens('<pad>', special_tokens=True)
tokenizer.pad_token = '<pad>'
assert (tokenizer.pad_token_id is not None)
sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])
_sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids
tokenizer.sentinel_token_ids = _sentinel_token_ids
|
class AutoTokenizerForMOD(AutoTokenizer):
'AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n '
@classmethod
def from_pretrained(cls, *args, **kwargs):
'See `AutoTokenizer.from_pretrained` docstring.'
tokenizer = super().from_pretrained(*args, **kwargs)
adapt_tokenizer_for_denoising(tokenizer)
return tokenizer
|
class MPTMLP(nn.Module):
def __init__(self, d_model: int, expansion_ratio: int, device: Optional[str]=None):
super().__init__()
self.up_proj = nn.Linear(d_model, (expansion_ratio * d_model), device=device)
self.act = nn.GELU(approximate='none')
self.down_proj = nn.Linear((expansion_ratio * d_model), d_model, device=device)
self.down_proj._is_residual = True
def forward(self, x):
return self.down_proj(self.act(self.up_proj(x)))
|
class MPTBlock(nn.Module):
def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', device: Optional[str]=None, **kwargs):
del kwargs
super().__init__()
norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]
self.norm_1 = norm_class(d_model, device=device)
self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, device=device)
self.norm_2 = norm_class(d_model, device=device)
self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)
self.resid_attn_dropout = nn.Dropout(resid_pdrop)
self.resid_ffn_dropout = nn.Dropout(resid_pdrop)
def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[(torch.Tensor, Optional[Tuple[torch.Tensor]])]:
a = self.norm_1(x)
(b, _, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)
x = (x + self.resid_attn_dropout(b))
m = self.norm_2(x)
n = self.ffn(m)
x = (x + self.resid_ffn_dropout(n))
return (x, past_key_value)
|
class MPTConfig(PretrainedConfig):
model_type = 'mpt'
def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[(float, str)]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):
"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n "
self.d_model = d_model
self.n_heads = n_heads
self.n_layers = n_layers
self.expansion_ratio = expansion_ratio
self.max_seq_len = max_seq_len
self.vocab_size = vocab_size
self.resid_pdrop = resid_pdrop
self.emb_pdrop = emb_pdrop
self.learned_pos_emb = learned_pos_emb
self.attn_config = attn_config
self.init_device = init_device
self.logit_scale = logit_scale
self.no_bias = no_bias
self.verbose = verbose
self.embedding_fraction = embedding_fraction
self.norm_type = norm_type
self.use_cache = use_cache
self.init_config = init_config
if ('name' in kwargs):
del kwargs['name']
if ('loss_fn' in kwargs):
del kwargs['loss_fn']
super().__init__(**kwargs)
self._validate_config()
def _set_config_defaults(self, config, config_defaults):
for (k, v) in config_defaults.items():
if (k not in config):
config[k] = v
return config
def _validate_config(self):
self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)
self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)
if ((self.d_model % self.n_heads) != 0):
raise ValueError('d_model must be divisible by n_heads')
if any((((prob < 0) or (prob > 1)) for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):
raise ValueError("self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1")
if (self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']):
raise ValueError(f"Unknown attn_impl={self.attn_config['attn_impl']}")
if (self.attn_config['prefix_lm'] and (self.attn_config['attn_impl'] not in ['torch', 'triton'])):
raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')
if (self.attn_config['alibi'] and (self.attn_config['attn_impl'] not in ['torch', 'triton'])):
raise NotImplementedError('alibi only implemented with torch and triton attention.')
if (self.attn_config['attn_uses_sequence_id'] and (self.attn_config['attn_impl'] not in ['torch', 'triton'])):
raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')
if ((self.embedding_fraction > 1) or (self.embedding_fraction <= 0)):
raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')
if (isinstance(self.logit_scale, str) and (self.logit_scale != 'inv_sqrt_d_model')):
raise ValueError(f"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
if (self.init_config.get('name', None) is None):
raise ValueError(f"self.init_config={self.init_config!r} 'name' needs to be set.")
if ((not self.learned_pos_emb) and (not self.attn_config['alibi'])):
raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')
|
@contextmanager
def init_empty_weights(include_buffers: bool=False):
"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n "
with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:
(yield f)
|
@contextmanager
def init_on_device(device: torch.device, include_buffers: bool=False):
'Device initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the specified device.\n\n Args:\n device (`torch.device`): Device to initialize all parameters on.\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n with init_on_device(device=torch.device("cuda")):\n tst = nn.Liner(100, 100) # on `cuda` device\n ```\n '
old_register_parameter = nn.Module.register_parameter
if include_buffers:
old_register_buffer = nn.Module.register_buffer
def register_empty_parameter(module, name, param):
old_register_parameter(module, name, param)
if (param is not None):
param_cls = type(module._parameters[name])
kwargs = module._parameters[name].__dict__
module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
def register_empty_buffer(module, name, buffer):
old_register_buffer(module, name, buffer)
if (buffer is not None):
module._buffers[name] = module._buffers[name].to(device)
if include_buffers:
tensor_constructors_to_patch = {torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ['empty', 'zeros', 'ones', 'full']}
else:
tensor_constructors_to_patch = {}
def patch_tensor_constructor(fn):
def wrapper(*args, **kwargs):
kwargs['device'] = device
return fn(*args, **kwargs)
return wrapper
try:
nn.Module.register_parameter = register_empty_parameter
if include_buffers:
nn.Module.register_buffer = register_empty_buffer
for torch_function_name in tensor_constructors_to_patch.keys():
setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
(yield)
finally:
nn.Module.register_parameter = old_register_parameter
if include_buffers:
nn.Module.register_buffer = old_register_buffer
for (torch_function_name, old_torch_function) in tensor_constructors_to_patch.items():
setattr(torch, torch_function_name, old_torch_function)
|
def _cast_if_autocast_enabled(tensor):
if torch.is_autocast_enabled():
if (tensor.device.type == 'cuda'):
dtype = torch.get_autocast_gpu_dtype()
elif (tensor.device.type == 'cpu'):
dtype = torch.get_autocast_cpu_dtype()
else:
raise NotImplementedError()
return tensor.to(dtype=dtype)
return tensor
|
class LPLayerNorm(torch.nn.LayerNorm):
def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True, device=None, dtype=None):
super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine, device=device, dtype=dtype)
def forward(self, x):
module_device = x.device
downcast_x = _cast_if_autocast_enabled(x)
downcast_weight = (_cast_if_autocast_enabled(self.weight) if (self.weight is not None) else self.weight)
downcast_bias = (_cast_if_autocast_enabled(self.bias) if (self.bias is not None) else self.bias)
with torch.autocast(enabled=False, device_type=module_device.type):
return torch.nn.functional.layer_norm(downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps)
|
def rms_norm(x, weight=None, eps=1e-05):
output = (x / torch.rsqrt((x.pow(2).mean((- 1), keepdim=True) + eps)))
if (weight is not None):
return (output * weight)
return output
|
class RMSNorm(torch.nn.Module):
def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None):
super().__init__()
self.eps = eps
if weight:
self.weight = torch.nn.Parameter(torch.ones(normalized_shape, dtype=dtype, device=device))
else:
self.register_parameter('weight', None)
def forward(self, x):
return rms_norm(x.float(), self.weight, self.eps).to(dtype=x.dtype)
|
class LPRMSNorm(RMSNorm):
def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None):
super().__init__(normalized_shape=normalized_shape, eps=eps, weight=weight, dtype=dtype, device=device)
def forward(self, x):
downcast_x = _cast_if_autocast_enabled(x)
downcast_weight = (_cast_if_autocast_enabled(self.weight) if (self.weight is not None) else self.weight)
with torch.autocast(enabled=False, device_type=x.device.type):
return rms_norm(downcast_x, downcast_weight, self.eps).to(dtype=x.dtype)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.