code
stringlengths
17
6.64M
def preprocess_mpt(sources, tokenizer: transformers.PreTrainedTokenizer) -> Dict: conv = conversation_lib.default_conversation.copy() roles = {'human': conv.roles[0], 'gpt': conv.roles[1]} conversations = [] for (i, source) in enumerate(sources): if (roles[source[0]['from']] != conv.roles[0]): source = source[1:] conv.messages = [] for (j, sentence) in enumerate(source): role = roles[sentence['from']] assert (role == conv.roles[(j % 2)]), f'{i}' conv.append_message(role, sentence['value']) conversations.append(conv.get_prompt()) input_ids = tokenizer(conversations, return_tensors='pt', padding='longest', max_length=tokenizer.model_max_length, truncation=True).input_ids targets = input_ids.clone() assert (conv.sep_style == conversation_lib.SeparatorStyle.MPT) sep = (conv.sep + conv.roles[1]) for (conversation, target) in zip(conversations, targets): total_len = int(target.ne(tokenizer.pad_token_id).sum()) rounds = conversation.split(conv.sep) re_rounds = [conv.sep.join(rounds[:3])] for conv_idx in range(3, len(rounds), 2): re_rounds.append(conv.sep.join(rounds[conv_idx:(conv_idx + 2)])) cur_len = 0 target[:cur_len] = IGNORE_INDEX for (i, rou) in enumerate(re_rounds): if (rou == ''): break parts = rou.split(sep) if (len(parts) != 2): break parts[0] += sep round_len = (len(tokenizer(rou).input_ids) + len(tokenizer(conv.sep).input_ids)) instruction_len = len(tokenizer(parts[0]).input_ids) target[cur_len:(cur_len + instruction_len)] = IGNORE_INDEX cur_len += round_len target[cur_len:] = IGNORE_INDEX if (cur_len < tokenizer.model_max_length): if (cur_len != total_len): target[:] = IGNORE_INDEX print(f'WARNING: tokenization mismatch: {cur_len} vs. {total_len}. (ignored)') return dict(input_ids=input_ids, labels=targets)
def preprocess(sources: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict: "Given a list of sources, each is a conversation list.\n\n This transform:\n 1. Add signal '### ' at the beginning each sentence, with end signal '\n';\n 2. Concatenate conversations together;\n 3. Tokenize the concatenated conversation;\n 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.\n " if (conversation_lib.default_conversation.version == 'v1'): return preprocess_v1(sources, tokenizer) if (conversation_lib.default_conversation.version == 'mpt'): return preprocess_mpt(sources, tokenizer) conversations = [] for source in sources: header = f'''{conversation_lib.default_conversation.system} ''' conversation = _add_speaker_and_signal(header, source) conversations.append(conversation) conversations_tokenized = _tokenize_fn(conversations, tokenizer) input_ids = conversations_tokenized['input_ids'] targets = copy.deepcopy(input_ids) for (target, source) in zip(targets, sources): tokenized_lens = _tokenize_fn(([header] + [s['value'] for s in source]), tokenizer)['input_ids_lens'] speakers = [sentence['from'] for sentence in source] _mask_targets(target, tokenized_lens, speakers) return dict(input_ids=input_ids, labels=targets)
class SupervisedDataset(Dataset): 'Dataset for supervised fine-tuning.' def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer): super(SupervisedDataset, self).__init__() logging.warning('Loading data...') list_data_dict = json.load(open(data_path, 'r')) logging.warning('Formatting inputs...') sources = [example['conversations'] for example in list_data_dict] data_dict = preprocess(sources, tokenizer) self.input_ids = data_dict['input_ids'] self.labels = data_dict['labels'] def __len__(self): return len(self.input_ids) def __getitem__(self, i) -> Dict[(str, torch.Tensor)]: return dict(input_ids=self.input_ids[i], labels=self.labels[i])
class LazySupervisedDataset(Dataset): 'Dataset for supervised fine-tuning.' def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer, multimodal_cfg: dict): super(LazySupervisedDataset, self).__init__() logging.warning('Loading data...') list_data_dict = json.load(open(data_path, 'r')) logging.warning('Formatting inputs...Skip in lazy mode') self.tokenizer = tokenizer self.list_data_dict = list_data_dict self.multimodal_cfg = multimodal_cfg def __len__(self): return len(self.list_data_dict) def __getitem__(self, i) -> Dict[(str, torch.Tensor)]: sources = self.list_data_dict[i] if isinstance(i, int): sources = [sources] assert (len(sources) == 1), "Don't know why it is wrapped to a list" if ('image' in sources[0]): image_file = self.list_data_dict[i]['image'] image_folder = self.multimodal_cfg['image_folder'] processor = self.multimodal_cfg['image_processor'] image = Image.open(os.path.join(image_folder, image_file)).convert('RGB') if (self.multimodal_cfg['image_aspect_ratio'] == 'keep'): (max_hw, min_hw) = (max(image.size), min(image.size)) aspect_ratio = (max_hw / min_hw) (max_len, min_len) = (448, 224) shortest_edge = int(min((max_len / aspect_ratio), min_len)) image = processor.preprocess(image, return_tensors='pt', do_center_crop=False, size={'shortest_edge': shortest_edge})['pixel_values'][0] elif (self.multimodal_cfg['image_aspect_ratio'] == 'pad'): def expand2square(pil_img, background_color): (width, height) = pil_img.size if (width == height): return pil_img elif (width > height): result = Image.new(pil_img.mode, (width, width), background_color) result.paste(pil_img, (0, ((width - height) // 2))) return result else: result = Image.new(pil_img.mode, (height, height), background_color) result.paste(pil_img, (((height - width) // 2), 0)) return result image = expand2square(image, tuple((int((x * 255)) for x in processor.image_mean))) image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0] else: image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0] cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14)) sources = preprocess_multimodal(copy.deepcopy([e['conversations'] for e in sources]), self.multimodal_cfg, cur_token_len) else: sources = copy.deepcopy([e['conversations'] for e in sources]) data_dict = preprocess(sources, self.tokenizer) if isinstance(i, int): data_dict = dict(input_ids=data_dict['input_ids'][0], labels=data_dict['labels'][0]) if ('image' in self.list_data_dict[i]): data_dict['image'] = image elif self.multimodal_cfg['is_multimodal']: crop_size = self.multimodal_cfg['image_processor'].crop_size data_dict['image'] = torch.zeros(3, crop_size['height'], crop_size['width']) return data_dict
@dataclass class DataCollatorForSupervisedDataset(object): 'Collate examples for supervised fine-tuning.' tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances: Sequence[Dict]) -> Dict[(str, torch.Tensor)]: (input_ids, labels) = tuple(([instance[key] for instance in instances] for key in ('input_ids', 'labels'))) input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) batch = dict(input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id)) if ('image' in instances[0]): images = [instance['image'] for instance in instances] if all((((x is not None) and (x.shape == images[0].shape)) for x in images)): batch['images'] = torch.stack(images) else: batch['images'] = images return batch
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict: 'Make dataset and collator for supervised fine-tuning.' dataset_cls = (LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset) train_dataset = dataset_cls(tokenizer=tokenizer, data_path=data_args.data_path, multimodal_cfg=dict(is_multimodal=data_args.is_multimodal, sep_image_conv_front=data_args.sep_image_conv_front, image_token_len=data_args.image_token_len, image_folder=data_args.image_folder, image_aspect_ratio=data_args.image_aspect_ratio, use_im_start_end=getattr(data_args, 'mm_use_im_start_end', False), image_processor=getattr(data_args, 'image_processor', None))) data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer) return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator)
def train(): parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments)) (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() if (model_args.vision_tower is not None): if ('mpt' in model_args.model_name_or_path): model = LlavaMPTForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir) elif model_args.with_spi: from gpt4roi.models.spi_llava import SPILlavaMPTForCausalLM model = SPILlavaMPTForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir) else: model = LlavaLlamaForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir) else: model = transformers.LlamaForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir) model.config.use_cache = False if model_args.freeze_backbone: model.model.requires_grad_(False) if ('mpt' in model_args.model_name_or_path): tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side='right') else: tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side='right', use_fast=False) if (model_args.version == 'v0'): if (tokenizer.pad_token is None): smart_tokenizer_and_embedding_resize(special_tokens_dict=dict(pad_token=DEFAULT_PAD_TOKEN), tokenizer=tokenizer, model=model) if ('llama' in model_args.model_name_or_path): tokenizer.add_special_tokens({'eos_token': DEFAULT_EOS_TOKEN, 'bos_token': DEFAULT_BOS_TOKEN, 'unk_token': DEFAULT_UNK_TOKEN}) else: tokenizer.pad_token = tokenizer.unk_token if ('mpt' in model_args.model_name_or_path): conversation_lib.default_conversation = conversation_lib.conv_templates['mpt'] else: conversation_lib.default_conversation = conversation_lib.conv_templates['vicuna_v1_1'] if (model_args.vision_tower is not None): model_vision_dict = model.get_model().initialize_vision_modules(vision_tower=model_args.vision_tower, mm_vision_select_layer=model_args.mm_vision_select_layer, pretrain_mm_mlp_adapter=model_args.pretrain_mm_mlp_adapter) dtype = torch.float32 if training_args.fp16: dtype = torch.float16 if training_args.bf16: dtype = torch.bfloat16 model.get_model().vision_tower[0].to(dtype=dtype, device=training_args.device) vision_config = model_vision_dict['vision_config'] data_args.image_token_len = model_vision_dict['image_token_len'] data_args.image_processor = model_vision_dict['image_processor'] data_args.is_multimodal = True model.config.tune_mm_mlp_adapter = training_args.tune_mm_mlp_adapter = model_args.tune_mm_mlp_adapter if model_args.tune_mm_mlp_adapter: model.requires_grad_(False) for p in model.get_model().mm_projector.parameters(): p.requires_grad = True model.config.freeze_mm_mlp_adapter = training_args.freeze_mm_mlp_adapter if training_args.freeze_mm_mlp_adapter: for p in model.get_model().mm_projector.parameters(): p.requires_grad = False model.config.mm_use_im_start_end = data_args.mm_use_im_start_end = model_args.mm_use_im_start_end vision_config.use_im_start_end = training_args.use_im_start_end = model_args.mm_use_im_start_end model.config.sep_image_conv_front = data_args.sep_image_conv_front model.initialize_vision_tokenizer(mm_use_im_start_end=model_args.mm_use_im_start_end, tokenizer=tokenizer, device=training_args.device, tune_mm_mlp_adapter=model_args.tune_mm_mlp_adapter, pretrain_mm_mlp_adapter=model_args.pretrain_mm_mlp_adapter) params_no_grad = [n for (n, p) in model.named_parameters() if (not p.requires_grad)] if (os.environ.get('SAVE_MEMORY', '0') == '1'): model.requires_grad_(False) model.half() model.lm_head.requires_grad_(True) model.model.spi_module.to(torch.float32) if (len(params_no_grad) > 0): if ((training_args.fsdp is not None) and (len(training_args.fsdp) > 0)): if (len(params_no_grad) < 10): print('[WARNING] Attempting to use FSDP while {} parameters do not require gradients: {}'.format(len(params_no_grad), params_no_grad)) else: print('[WARNING] Attempting to use FSDP while {} parameters do not require gradients: {}...(omitted)'.format(len(params_no_grad), ', '.join(params_no_grad[:10]))) print('[WARNING] Attempting to use FSDP with partially frozen paramters, this is experimental.') print('[WARNING] As of 4/30/23, this feature requires PyTorch-nightly build. See here for details: https://github.com/haotian-liu/LLaVA#experimental-use-fsdp-to-save-memory-in-pretraining') from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP def patch_FSDP_use_orig_params(func): def wrap_func(*args, **kwargs): use_orig_params = kwargs.pop('use_orig_params', True) return func(*args, **kwargs, use_orig_params=use_orig_params) return wrap_func FSDP.__init__ = patch_FSDP_use_orig_params(FSDP.__init__) from gpt4roi.datasets.data_modules import make_multitask_data_module data_module = make_multitask_data_module(tokenizer=tokenizer, data_args=data_args) if model_args.load_from: print(f'load ckpt from {model_args.load_from}') model.from_pretrained(model_args.load_from) if os.environ.get('ONLY_SPI', None): for (n, p) in model.named_parameters(): if ('spi_module' not in n): p.requires_grad = False else: p.requires_grad = True print(n) if os.environ.get('PROJ', None): for (n, p) in model.named_parameters(): if ('mm_projector' in n): p.requires_grad = True print(n) trainer = LLaVATrainer(model=model, tokenizer=tokenizer, args=training_args, **data_module) print('all trainable parameters') for (n, p) in model.named_parameters(): if p.requires_grad: print(n) if list(pathlib.Path(training_args.output_dir).glob('checkpoint-*')): print('resume', ('---' * 200)) trainer.train(resume_from_checkpoint=True) else: trainer.train() trainer.save_state() safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
class SeparatorStyle(Enum): 'Different separator style.' SINGLE = auto() TWO = auto() MPT = auto()
@dataclasses.dataclass class Conversation(): 'A class that keeps all conversation history.' system: str roles: List[str] messages: List[List[str]] offset: int sep_style: SeparatorStyle = SeparatorStyle.SINGLE sep: str = '###' sep2: str = None version: str = 'Unknown' skip_next: bool = False def get_prompt(self): if (self.sep_style == SeparatorStyle.SINGLE): ret = (self.system + self.sep) for (role, message) in self.messages: if message: if (type(message) is tuple): (message, _, _) = message ret += (((role + ': ') + message) + self.sep) else: ret += (role + ':') return ret elif (self.sep_style == SeparatorStyle.TWO): seps = [self.sep, self.sep2] ret = (self.system + seps[0]) for (i, (role, message)) in enumerate(self.messages): if message: if (type(message) is tuple): (message, _, _) = message ret += (((role + ': ') + message) + seps[(i % 2)]) else: ret += (role + ':') return ret if (self.sep_style == SeparatorStyle.MPT): ret = (self.system + self.sep) for (role, message) in self.messages: if message: if (type(message) is tuple): (message, _, _) = message ret += ((role + message) + self.sep) else: ret += role return ret else: raise ValueError(f'Invalid style: {self.sep_style}') def append_message(self, role, message): self.messages.append([role, message]) def get_images(self, return_pil=False): images = [] for (i, (role, msg)) in enumerate(self.messages[self.offset:]): if ((i % 2) == 0): if (type(msg) is tuple): import base64 from io import BytesIO from PIL import Image (msg, image, image_process_mode) = msg if (image_process_mode == 'Pad'): def expand2square(pil_img, background_color=(122, 116, 104)): (width, height) = pil_img.size if (width == height): return pil_img elif (width > height): result = Image.new(pil_img.mode, (width, width), background_color) result.paste(pil_img, (0, ((width - height) // 2))) return result else: result = Image.new(pil_img.mode, (height, height), background_color) result.paste(pil_img, (((height - width) // 2), 0)) return result image = expand2square(image) elif (image_process_mode == 'Crop'): pass elif (image_process_mode == 'Resize'): image = image.resize((224, 224)) else: raise ValueError(f'Invalid image_process_mode: {image_process_mode}') (max_hw, min_hw) = (max(image.size), min(image.size)) aspect_ratio = (max_hw / min_hw) (max_len, min_len) = (800, 400) shortest_edge = int(min((max_len / aspect_ratio), min_len, min_hw)) longest_edge = int((shortest_edge * aspect_ratio)) (W, H) = image.size if (H > W): (H, W) = (longest_edge, shortest_edge) else: (H, W) = (shortest_edge, longest_edge) image = image.resize((W, H)) if return_pil: images.append(image) else: buffered = BytesIO() image.save(buffered, format='JPEG') img_b64_str = base64.b64encode(buffered.getvalue()).decode() images.append(img_b64_str) return images def to_gradio_chatbot(self): ret = [] for (i, (role, msg)) in enumerate(self.messages[self.offset:]): if ((i % 2) == 0): if (type(msg) is tuple): import base64 from io import BytesIO (msg, image, image_process_mode) = msg (max_hw, min_hw) = (max(image.size), min(image.size)) aspect_ratio = (max_hw / min_hw) (max_len, min_len) = (800, 400) shortest_edge = int(min((max_len / aspect_ratio), min_len, min_hw)) longest_edge = int((shortest_edge * aspect_ratio)) (W, H) = image.size if (H > W): (H, W) = (longest_edge, shortest_edge) else: (H, W) = (shortest_edge, longest_edge) image = image.resize((W, H)) buffered = BytesIO() image.save(buffered, format='JPEG') img_b64_str = base64.b64encode(buffered.getvalue()).decode() img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="user upload image" />' msg = msg.replace('<image>', img_str) ret.append([msg, None]) else: ret[(- 1)][(- 1)] = msg return ret def copy(self): return Conversation(system=self.system, roles=self.roles, messages=[[x, y] for (x, y) in self.messages], offset=self.offset, sep_style=self.sep_style, sep=self.sep, sep2=self.sep2) def dict(self): if (len(self.get_images()) > 0): return {'system': self.system, 'roles': self.roles, 'messages': [[x, (y[0] if (type(y) is tuple) else y)] for (x, y) in self.messages], 'offset': self.offset, 'sep': self.sep, 'sep2': self.sep2} return {'system': self.system, 'roles': self.roles, 'messages': self.messages, 'offset': self.offset, 'sep': self.sep, 'sep2': self.sep2}
def main(args): data_path = pathlib.Path(args.data_path) with data_path.open() as f: data = json.load(f) (prompt_input, prompt_no_input) = (PROMPT_DICT['prompt_input'], PROMPT_DICT['prompt_no_input']) sources = [(prompt_input.format_map(example) if (example.get('input', '') != '') else prompt_no_input.format_map(example)) for example in data] targets = [example['output'] for example in data] new_data = [] cnt = 1 for (s, t) in zip(sources, targets): new_data.append({'id': str(cnt), 'conversations': [{'from': 'human', 'value': s}, {'from': 'gpt', 'value': t}]}) cnt += 1 json.dump(new_data, open(args.output_path, 'w'), indent=2)
def reformat_code(val: str) -> str: return re.sub(code_lang_pattern, code_lang_format, val)
def html_to_markdown(val: str) -> str: val = re.sub(div_pattern, '', val) val = re.sub(span_pattern, '', val) val = markdownify.markdownify(val).strip() val = reformat_code(val) noise = re.search(regenerate_pattern, val) if (noise and (noise.start() == 0)): val = val[noise.end():] val = re.sub(copy_chars_pattern, '', val) val = re.sub(copy_code_pattern, '', val) val = val.replace('\n\n\n', '\n').strip() return val
def contain_blocked_words(val: str) -> bool: blocked_words = ['openai', 'chatgpt'] for w in blocked_words: if (w in val.lower()): return True return False
def clean_html_one_sample(sample): roles = ['human', 'gpt'] if (len(sample['conversations']) <= 1): return (sample, 1) if (sample['conversations'][0]['from'] != 'human'): sample['conversations'] = sample['conversations'][1:] if (len(sample['conversations']) <= 1): return (sample, 1) if (sample['conversations'][(- 1)]['from'] == 'human'): sample['conversations'] = sample['conversations'][:(- 1)] if (len(sample['conversations']) <= 1): return (sample, 1) for (i, c) in enumerate(sample['conversations']): if (c['from'] != roles[(i % 2)]): return (sample, 2) if contain_blocked_words(c['value']): return (sample, 3) try: new_val = html_to_markdown(c['value']) except (bs4.builder.ParserRejectedMarkup, AssertionError): return (sample, 4) c['value'] = new_val return (sample, 0)
def clean_html_all(content, begin, end): '\n Clean the source html files.\n ' cnt_skip = 0 cnt_blocked_words = 0 cnt_wrong_format = 0 cnt_parser_error = 0 cnt_too_short = 0 cnt_id_duplication = 0 cnt_value_duplication = 0 cnt_tag = 0 content = content[begin:end] processed = [] with ProcessPoolExecutor() as executor: for result in tqdm(executor.map(clean_html_one_sample, content), total=len(content)): processed.append(result) visited = {} new_content = [] for (sample, error_code) in tqdm(processed): cid = sample['id'] skipped = True if (error_code != 0): if (error_code == 1): print(f'id {cid} is too short') cnt_too_short += 1 elif (error_code == 2): print(f'id {cid} has a wrong format') cnt_wrong_format += 1 elif (error_code == 3): print(f'id {cid} contains blocked words') cnt_blocked_words += 1 elif (error_code == 4): print(f'id {cid} contains parser errors') cnt_parser_error += 1 else: raise ValueError(f'Invalid error_code: {error_code}') elif (cid in visited): print(f'id {cid} is an id duplication of {visited[cid]}') cnt_id_duplication += 1 elif ((sample['conversations'][1]['value'], len(sample['conversations'])) in visited): key = (sample['conversations'][1]['value'], len(sample['conversations'])) print(f'id {cid} is a value duplication of {visited[key]}') cnt_value_duplication += 1 else: key = (sample['conversations'][1]['value'], len(sample['conversations'])) visited[cid] = visited[key] = cid skipped = False if (not skipped): new_content.append(sample) else: cnt_skip += 1 print(f'total: {len(content)}, skip: {cnt_skip}, new: {len(new_content)}, cnt_blocked_words: {cnt_blocked_words}, cnt_parser_error: {cnt_parser_error}, cnt_wrong_format: {cnt_wrong_format}, cnt_too_short: {cnt_too_short}, cnt_id_duplication: {cnt_id_duplication}, cnt_value_duplication: {cnt_value_duplication}, ') return new_content
def main(args): content = json.load(open(args['in_file'], 'r')) content = clean_html_all(content, args['begin'], args['end']) json.dump(content, open(args['out_file'], 'w'), indent=2)
def skip(conv, args): if ((args.lang != 'all') or (args.skip_lang is not None)): text = '\n'.join([x['value'] for x in conv['conversations']]) try: lang_code = Detector(text).language.code except (pycld2.error, polyglot.detect.base.UnknownLanguage): lang_code = 'unknown' if ((args.lang != 'all') and (lang_code != args.lang)): return True if (lang_code == args.skip_lang): return True if args.reduce_rep: for sentence in conv['conversations']: val = sentence['value'] sub = re.search('(\\d)\\1{8}', val) if (sub is not None): return True return False
def split_sample(sample, start_idx, end_idx): end_speaker = sample['conversations'][end_idx]['from'] end_idx = ((end_idx + 1) if (end_speaker != 'human') else end_idx) return {'id': ((sample['id'] + '_') + str(start_idx)), 'conversations': sample['conversations'][start_idx:end_idx]}
def split_contents(content, begin, end, tokenizer, max_length): '\n Keep the maximum round of conversations within the max token length constraint\n ' content = content[begin:end] new_content = [] for sample in tqdm.tqdm(content): tokenized_lens = [] for c in sample['conversations']: from_str = c['from'] if (from_str.lower() == 'human'): from_str = conversation_lib.default_conversation.roles[0] elif (from_str.lower() == 'gpt'): from_str = conversation_lib.default_conversation.roles[1] else: from_str = 'unknown' sentence = ((((BEGIN_SIGNAL + from_str) + ': ') + c['value']) + END_SIGNAL) length = tokenizer(sentence, return_tensors='pt', padding='longest').input_ids.ne(tokenizer.pad_token_id).sum().item() tokenized_lens.append(length) num_tokens = 0 start_idx = 0 for (idx, l) in enumerate(tokenized_lens): if ((num_tokens + l) > max_length): new_content.append(split_sample(sample, start_idx, idx)) start_idx = idx num_tokens = l else: num_tokens += l if (idx == (len(tokenized_lens) - 1)): new_content.append(split_sample(sample, start_idx, idx)) print(f'total: {len(content)}, new: {len(new_content)}') return new_content
def main(args): content = json.load(open(args.in_file, 'r')) tokenizer = transformers.AutoTokenizer.from_pretrained(args.model_name_or_path, model_max_length=args.max_length, padding_side='right', use_fast=False) if (tokenizer.pad_token is None): tokenizer.add_special_tokens(dict(pad_token=DEFAULT_PAD_TOKEN)) content = split_contents(content, args.begin, args.end, tokenizer, args.max_length) json.dump(content, open(args.out_file, 'w'), indent=2)
@ray.remote(num_cpus=4) def get_eval(content: str, max_tokens: int): while True: try: response = openai.ChatCompletion.create(model='gpt-4', messages=[{'role': 'system', 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'}, {'role': 'user', 'content': content}], temperature=0.2, max_tokens=max_tokens) break except openai.error.RateLimitError: pass except Exception as e: print(e) time.sleep(1) print('success!') return response['choices'][0]['message']['content']
def parse_score(review): try: score_pair = review.split('\n')[0] score_pair = score_pair.replace(',', ' ') sp = score_pair.split(' ') if (len(sp) == 2): return [float(sp[0]), float(sp[1])] else: print('error', review) return [(- 1), (- 1)] except Exception as e: print(e) print('error', review) return [(- 1), (- 1)]
@ray.remote(num_cpus=4) def get_eval(content: str, max_tokens: int): while True: try: response = openai.ChatCompletion.create(model='gpt-4', messages=[{'role': 'system', 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'}, {'role': 'user', 'content': content}], temperature=0.2, max_tokens=max_tokens) break except openai.error.RateLimitError: pass except Exception as e: print(e) time.sleep(1) print('success!') return response['choices'][0]['message']['content']
def parse_score(review): try: score_pair = review.split('\n')[0] score_pair = score_pair.replace(',', ' ') sp = score_pair.split(' ') if (len(sp) == 2): return [float(sp[0]), float(sp[1])] else: print('error', review) return [(- 1), (- 1)] except Exception as e: print(e) print('error', review) return [(- 1), (- 1)]
def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--base-dir', type=str) parser.add_argument('--result-file', type=str) parser.add_argument('--output-file', type=str) parser.add_argument('--output-result', type=str) parser.add_argument('--split', type=str, default='test') parser.add_argument('--options', type=list, default=['A', 'B', 'C', 'D', 'E']) return parser.parse_args()
def convert_caps(results): fakecaps = [] for result in results: image_id = result['question_id'] caption = result['text'] fakecaps.append({'image_id': int(image_id), 'caption': caption}) return fakecaps
def get_pred_idx(prediction, choices, options): "\n Get the index (e.g. 2) from the prediction (e.g. 'C')\n " if (prediction in options[:len(choices)]): return options.index(prediction) else: return random.choice(range(len(choices)))
def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--base-dir', type=str) parser.add_argument('--gpt4-result', type=str) parser.add_argument('--our-result', type=str) parser.add_argument('--split', type=str, default='test') parser.add_argument('--options', type=list, default=['A', 'B', 'C', 'D', 'E']) return parser.parse_args()
def convert_caps(results): fakecaps = [] for result in results: image_id = result['question_id'] caption = result['text'] fakecaps.append({'image_id': int(image_id), 'caption': caption}) return fakecaps
def get_pred_idx(prediction, choices, options): "\n Get the index (e.g. 2) from the prediction (e.g. 'C')\n " if (prediction in options[:len(choices)]): return options.index(prediction) else: return random.choice(range(len(choices)))
def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--base-dir', type=str) parser.add_argument('--gpt4-result', type=str) parser.add_argument('--requery-result', type=str) parser.add_argument('--our-result', type=str) parser.add_argument('--output-result', type=str) parser.add_argument('--split', type=str, default='test') parser.add_argument('--options', type=list, default=['A', 'B', 'C', 'D', 'E']) return parser.parse_args()
def convert_caps(results): fakecaps = [] for result in results: image_id = result['question_id'] caption = result['text'] fakecaps.append({'image_id': int(image_id), 'caption': caption}) return fakecaps
def get_pred_idx(prediction, choices, options): "\n Get the index (e.g. 2) from the prediction (e.g. 'C')\n " if (prediction in options[:len(choices)]): return options.index(prediction) else: return random.choice(range(len(choices)))
def read_jsonl(path: str, key: str=None): data = [] with open(os.path.expanduser(path)) as f: for line in f: if (not line): continue data.append(json.loads(line)) if (key is not None): data.sort(key=(lambda x: x[key])) data = {item[key]: item for item in data} return data
def trim_hanging_lines(s: str, n: int) -> str: s = s.strip() for _ in range(n): s = s.split('\n', 1)[1].strip() return s
def get_answer(question_id: int, question: str, max_tokens: int): ans = {'answer_id': shortuuid.uuid(), 'question_id': question_id, 'model_id': MODEL_ID} for _ in range(3): try: response = openai.ChatCompletion.create(model=MODEL, messages=[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': question}], max_tokens=max_tokens) ans['text'] = response['choices'][0]['message']['content'] return ans except Exception as e: print('[ERROR]', e) ans['text'] = '#ERROR#' time.sleep(1) return ans
def consolidate_ckpt(src_path, dst_path): print('Loading model') auto_upgrade(src_path) src_model = AutoModelForCausalLM.from_pretrained(src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) src_tokenizer = AutoTokenizer.from_pretrained(src_path) src_model.save_pretrained(dst_path) src_tokenizer.save_pretrained(dst_path)
def adapt_tokenizer_for_denoising(tokenizer: Tokenizer): 'Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n ' sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)] tokenizer.add_tokens(sentinels_to_add, special_tokens=True) if (tokenizer.pad_token is None): tokenizer.add_tokens('<pad>', special_tokens=True) tokenizer.pad_token = '<pad>' assert (tokenizer.pad_token_id is not None) sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]) _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids tokenizer.sentinel_token_ids = _sentinel_token_ids
class AutoTokenizerForMOD(AutoTokenizer): 'AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n ' @classmethod def from_pretrained(cls, *args, **kwargs): 'See `AutoTokenizer.from_pretrained` docstring.' tokenizer = super().from_pretrained(*args, **kwargs) adapt_tokenizer_for_denoising(tokenizer) return tokenizer
class MPTMLP(nn.Module): def __init__(self, d_model: int, expansion_ratio: int, device: Optional[str]=None): super().__init__() self.up_proj = nn.Linear(d_model, (expansion_ratio * d_model), device=device) self.act = nn.GELU(approximate='none') self.down_proj = nn.Linear((expansion_ratio * d_model), d_model, device=device) self.down_proj._is_residual = True def forward(self, x): return self.down_proj(self.act(self.up_proj(x)))
class MPTBlock(nn.Module): def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', device: Optional[str]=None, **kwargs): del kwargs super().__init__() norm_class = NORM_CLASS_REGISTRY[norm_type.lower()] attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']] self.norm_1 = norm_class(d_model, device=device) self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, device=device) self.norm_2 = norm_class(d_model, device=device) self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device) self.resid_attn_dropout = nn.Dropout(resid_pdrop) self.resid_ffn_dropout = nn.Dropout(resid_pdrop) def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[(torch.Tensor, Optional[Tuple[torch.Tensor]])]: a = self.norm_1(x) (b, _, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal) x = (x + self.resid_attn_dropout(b)) m = self.norm_2(x) n = self.ffn(m) x = (x + self.resid_ffn_dropout(n)) return (x, past_key_value)
class MPTConfig(PretrainedConfig): model_type = 'mpt' def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[(float, str)]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs): "The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n " self.d_model = d_model self.n_heads = n_heads self.n_layers = n_layers self.expansion_ratio = expansion_ratio self.max_seq_len = max_seq_len self.vocab_size = vocab_size self.resid_pdrop = resid_pdrop self.emb_pdrop = emb_pdrop self.learned_pos_emb = learned_pos_emb self.attn_config = attn_config self.init_device = init_device self.logit_scale = logit_scale self.no_bias = no_bias self.verbose = verbose self.embedding_fraction = embedding_fraction self.norm_type = norm_type self.use_cache = use_cache self.init_config = init_config if ('name' in kwargs): del kwargs['name'] if ('loss_fn' in kwargs): del kwargs['loss_fn'] super().__init__(**kwargs) self._validate_config() def _set_config_defaults(self, config, config_defaults): for (k, v) in config_defaults.items(): if (k not in config): config[k] = v return config def _validate_config(self): self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults) self.init_config = self._set_config_defaults(self.init_config, init_config_defaults) if ((self.d_model % self.n_heads) != 0): raise ValueError('d_model must be divisible by n_heads') if any((((prob < 0) or (prob > 1)) for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])): raise ValueError("self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1") if (self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']): raise ValueError(f"Unknown attn_impl={self.attn_config['attn_impl']}") if (self.attn_config['prefix_lm'] and (self.attn_config['attn_impl'] not in ['torch', 'triton'])): raise NotImplementedError('prefix_lm only implemented with torch and triton attention.') if (self.attn_config['alibi'] and (self.attn_config['attn_impl'] not in ['torch', 'triton'])): raise NotImplementedError('alibi only implemented with torch and triton attention.') if (self.attn_config['attn_uses_sequence_id'] and (self.attn_config['attn_impl'] not in ['torch', 'triton'])): raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.') if ((self.embedding_fraction > 1) or (self.embedding_fraction <= 0)): raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!') if (isinstance(self.logit_scale, str) and (self.logit_scale != 'inv_sqrt_d_model')): raise ValueError(f"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.") if (self.init_config.get('name', None) is None): raise ValueError(f"self.init_config={self.init_config!r} 'name' needs to be set.") if ((not self.learned_pos_emb) and (not self.attn_config['alibi'])): raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')
@contextmanager def init_empty_weights(include_buffers: bool=False): "Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n " with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f: (yield f)
@contextmanager def init_on_device(device: torch.device, include_buffers: bool=False): 'Device initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the specified device.\n\n Args:\n device (`torch.device`): Device to initialize all parameters on.\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n with init_on_device(device=torch.device("cuda")):\n tst = nn.Liner(100, 100) # on `cuda` device\n ```\n ' old_register_parameter = nn.Module.register_parameter if include_buffers: old_register_buffer = nn.Module.register_buffer def register_empty_parameter(module, name, param): old_register_parameter(module, name, param) if (param is not None): param_cls = type(module._parameters[name]) kwargs = module._parameters[name].__dict__ module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs) def register_empty_buffer(module, name, buffer): old_register_buffer(module, name, buffer) if (buffer is not None): module._buffers[name] = module._buffers[name].to(device) if include_buffers: tensor_constructors_to_patch = {torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ['empty', 'zeros', 'ones', 'full']} else: tensor_constructors_to_patch = {} def patch_tensor_constructor(fn): def wrapper(*args, **kwargs): kwargs['device'] = device return fn(*args, **kwargs) return wrapper try: nn.Module.register_parameter = register_empty_parameter if include_buffers: nn.Module.register_buffer = register_empty_buffer for torch_function_name in tensor_constructors_to_patch.keys(): setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name))) (yield) finally: nn.Module.register_parameter = old_register_parameter if include_buffers: nn.Module.register_buffer = old_register_buffer for (torch_function_name, old_torch_function) in tensor_constructors_to_patch.items(): setattr(torch, torch_function_name, old_torch_function)
def _cast_if_autocast_enabled(tensor): if torch.is_autocast_enabled(): if (tensor.device.type == 'cuda'): dtype = torch.get_autocast_gpu_dtype() elif (tensor.device.type == 'cpu'): dtype = torch.get_autocast_cpu_dtype() else: raise NotImplementedError() return tensor.to(dtype=dtype) return tensor
class LPLayerNorm(torch.nn.LayerNorm): def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True, device=None, dtype=None): super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine, device=device, dtype=dtype) def forward(self, x): module_device = x.device downcast_x = _cast_if_autocast_enabled(x) downcast_weight = (_cast_if_autocast_enabled(self.weight) if (self.weight is not None) else self.weight) downcast_bias = (_cast_if_autocast_enabled(self.bias) if (self.bias is not None) else self.bias) with torch.autocast(enabled=False, device_type=module_device.type): return torch.nn.functional.layer_norm(downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps)
def rms_norm(x, weight=None, eps=1e-05): output = (x / torch.rsqrt((x.pow(2).mean((- 1), keepdim=True) + eps))) if (weight is not None): return (output * weight) return output
class RMSNorm(torch.nn.Module): def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None): super().__init__() self.eps = eps if weight: self.weight = torch.nn.Parameter(torch.ones(normalized_shape, dtype=dtype, device=device)) else: self.register_parameter('weight', None) def forward(self, x): return rms_norm(x.float(), self.weight, self.eps).to(dtype=x.dtype)
class LPRMSNorm(RMSNorm): def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None): super().__init__(normalized_shape=normalized_shape, eps=eps, weight=weight, dtype=dtype, device=device) def forward(self, x): downcast_x = _cast_if_autocast_enabled(x) downcast_weight = (_cast_if_autocast_enabled(self.weight) if (self.weight is not None) else self.weight) with torch.autocast(enabled=False, device_type=x.device.type): return rms_norm(downcast_x, downcast_weight, self.eps).to(dtype=x.dtype)
def torch_default_param_init_fn_(module: nn.Module, verbose: int=0, **kwargs): del kwargs if (verbose > 1): warnings.warn(f"Initializing network using module's reset_parameters attribute") if hasattr(module, 'reset_parameters'): module.reset_parameters()
def fused_init_helper_(module: nn.Module, init_fn_): _fused = getattr(module, '_fused', None) if (_fused is None): raise RuntimeError(f'Internal logic error') (dim, splits) = _fused splits = (0, *splits, module.weight.size(dim)) for (s, e) in zip(splits[:(- 1)], splits[1:]): slice_indices = ([slice(None)] * module.weight.ndim) slice_indices[dim] = slice(s, e) init_fn_(module.weight[slice_indices])
def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, verbose: int=0, **kwargs): del kwargs if (verbose > 1): warnings.warn(f'If model has bias parameters they are initialized to 0.') init_div_is_residual = init_div_is_residual if (init_div_is_residual is False): div_is_residual = 1.0 elif (init_div_is_residual is True): div_is_residual = math.sqrt((2 * n_layers)) elif (isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int)): div_is_residual = init_div_is_residual elif (isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric()): div_is_residual = float(init_div_is_residual) else: div_is_residual = 1.0 raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}') if (init_div_is_residual is not False): if (verbose > 1): warnings.warn((f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')) if isinstance(module, nn.Linear): if hasattr(module, '_fused'): fused_init_helper_(module, init_fn_) else: init_fn_(module.weight) if (module.bias is not None): torch.nn.init.zeros_(module.bias) if ((init_div_is_residual is not False) and getattr(module, '_is_residual', False)): with torch.no_grad(): module.weight.div_(div_is_residual) elif isinstance(module, nn.Embedding): if (emb_init_std is not None): std = emb_init_std if (std == 0): warnings.warn(f'Embedding layer initialized to 0.') emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std) if (verbose > 1): warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.') elif (emb_init_uniform_lim is not None): lim = emb_init_uniform_lim if isinstance(lim, Sequence): if (len(lim) > 2): raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.') if (lim[0] == lim[1]): warnings.warn(f'Embedding layer initialized to {lim[0]}.') else: if (lim == 0): warnings.warn(f'Embedding layer initialized to 0.') lim = [(- lim), lim] (a, b) = lim emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b) if (verbose > 1): warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.') else: emb_init_fn_ = init_fn_ emb_init_fn_(module.weight) elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))): if (verbose > 1): warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.') if (hasattr(module, 'weight') and (module.weight is not None)): torch.nn.init.ones_(module.weight) if (hasattr(module, 'bias') and (module.bias is not None)): torch.nn.init.zeros_(module.bias) elif isinstance(module, nn.MultiheadAttention): if module._qkv_same_embed_dim: assert (module.in_proj_weight is not None) assert ((module.q_proj_weight is None) and (module.k_proj_weight is None) and (module.v_proj_weight is None)) assert (d_model is not None) _d = d_model splits = (0, _d, (2 * _d), (3 * _d)) for (s, e) in zip(splits[:(- 1)], splits[1:]): init_fn_(module.in_proj_weight[s:e]) else: assert ((module.q_proj_weight is not None) and (module.k_proj_weight is not None) and (module.v_proj_weight is not None)) assert (module.in_proj_weight is None) init_fn_(module.q_proj_weight) init_fn_(module.k_proj_weight) init_fn_(module.v_proj_weight) if (module.in_proj_bias is not None): torch.nn.init.zeros_(module.in_proj_bias) if (module.bias_k is not None): torch.nn.init.zeros_(module.bias_k) if (module.bias_v is not None): torch.nn.init.zeros_(module.bias_v) init_fn_(module.out_proj.weight) if ((init_div_is_residual is not False) and getattr(module.out_proj, '_is_residual', False)): with torch.no_grad(): module.out_proj.weight.div_(div_is_residual) if (module.out_proj.bias is not None): torch.nn.init.zeros_(module.out_proj.bias) else: for _ in module.parameters(recurse=False): raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')
def _normal_init_(std, mean=0.0): return partial(torch.nn.init.normal_, mean=mean, std=std)
def _normal_param_init_fn_(module: nn.Module, std: float, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, verbose: int=0, **kwargs): del kwargs init_fn_ = _normal_init_(std=std) if (verbose > 1): warnings.warn(f'Using torch.nn.init.normal_ init fn mean=0.0, std={std}') generic_param_init_fn_(module=module, init_fn_=init_fn_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
def baseline_param_init_fn_(module: nn.Module, init_std: float, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, verbose: int=0, **kwargs): del kwargs if (init_std is None): raise ValueError("You must set model.init_config['init_std'] to a float value to use the default initialization scheme.") _normal_param_init_fn_(module=module, std=init_std, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
def small_param_init_fn_(module: nn.Module, n_layers: int, d_model: int, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, verbose: int=0, **kwargs): del kwargs std = math.sqrt((2 / (5 * d_model))) _normal_param_init_fn_(module=module, std=std, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
def neox_param_init_fn_(module: nn.Module, n_layers: int, d_model: int, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, verbose: int=0, **kwargs): 'From section 2.3.1 of GPT-NeoX-20B:\n\n An Open-Source AutoregressiveLanguage Model โ€” Black et. al. (2022)\n see https://github.com/EleutherAI/gpt-neox/blob/9610391ab319403cef079b438edd016a2443af54/megatron/model/init_functions.py#L151\n and https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/transformer.py\n ' del kwargs residual_div = (n_layers / math.sqrt(10)) if (verbose > 1): warnings.warn(f'setting init_div_is_residual to {residual_div}') small_param_init_fn_(module=module, d_model=d_model, n_layers=n_layers, init_div_is_residual=residual_div, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
def kaiming_uniform_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, init_gain: float=0, fan_mode: str='fan_in', init_nonlinearity: str='leaky_relu', verbose: int=0, **kwargs): del kwargs if (verbose > 1): warnings.warn((f'Using nn.init.kaiming_uniform_ init fn with parameters: ' + f'a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}')) kaiming_uniform_ = partial(nn.init.kaiming_uniform_, a=init_gain, mode=fan_mode, nonlinearity=init_nonlinearity) generic_param_init_fn_(module=module, init_fn_=kaiming_uniform_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
def kaiming_normal_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, init_gain: float=0, fan_mode: str='fan_in', init_nonlinearity: str='leaky_relu', verbose: int=0, **kwargs): del kwargs if (verbose > 1): warnings.warn((f'Using nn.init.kaiming_normal_ init fn with parameters: ' + f'a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}')) kaiming_normal_ = partial(torch.nn.init.kaiming_normal_, a=init_gain, mode=fan_mode, nonlinearity=init_nonlinearity) generic_param_init_fn_(module=module, init_fn_=kaiming_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
def xavier_uniform_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, init_gain: float=0, verbose: int=0, **kwargs): del kwargs xavier_uniform_ = partial(torch.nn.init.xavier_uniform_, gain=init_gain) if (verbose > 1): warnings.warn((f'Using torch.nn.init.xavier_uniform_ init fn with parameters: ' + f'gain={init_gain}')) generic_param_init_fn_(module=module, init_fn_=xavier_uniform_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
def xavier_normal_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, init_gain: float=0, verbose: int=0, **kwargs): xavier_normal_ = partial(torch.nn.init.xavier_normal_, gain=init_gain) if (verbose > 1): warnings.warn((f'Using torch.nn.init.xavier_normal_ init fn with parameters: ' + f'gain={init_gain}')) generic_param_init_fn_(module=module, init_fn_=xavier_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose)
@torch.inference_mode() def generate_stream(tokenizer, model, params, device, context_len=2048, stream_interval=2): 'Adapted from fastchat/serve/model_worker.py::generate_stream' prompt = params['prompt'] l_prompt = len(prompt) temperature = float(params.get('temperature', 1.0)) max_new_tokens = int(params.get('max_new_tokens', 256)) stop_str = params.get('stop', None) input_ids = tokenizer(prompt).input_ids output_ids = list(input_ids) max_src_len = ((context_len - max_new_tokens) - 8) input_ids = input_ids[(- max_src_len):] for i in range(max_new_tokens): if (i == 0): out = model(torch.as_tensor([input_ids], device=device), use_cache=True) logits = out.logits past_key_values = out.past_key_values else: attention_mask = torch.ones(1, (past_key_values[0][0].shape[(- 2)] + 1), device=device) out = model(input_ids=torch.as_tensor([[token]], device=device), use_cache=True, attention_mask=attention_mask, past_key_values=past_key_values) logits = out.logits past_key_values = out.past_key_values last_token_logits = logits[0][(- 1)] if (temperature < 0.0001): token = int(torch.argmax(last_token_logits)) else: probs = torch.softmax((last_token_logits / temperature), dim=(- 1)) token = int(torch.multinomial(probs, num_samples=1)) output_ids.append(token) if (token == tokenizer.eos_token_id): stopped = True else: stopped = False if (((i % stream_interval) == 0) or (i == (max_new_tokens - 1)) or stopped): output = tokenizer.decode(output_ids, skip_special_tokens=True) pos = output.rfind(stop_str, l_prompt) if (pos != (- 1)): output = output[:pos] stopped = True (yield output) if stopped: break del past_key_values
def main(args): model_name = args.model_name num_gpus = args.num_gpus if (args.device == 'cuda'): kwargs = {'torch_dtype': torch.float16} if (num_gpus == 'auto'): kwargs['device_map'] = 'auto' else: num_gpus = int(num_gpus) if (num_gpus != 1): kwargs.update({'device_map': 'auto', 'max_memory': {i: '13GiB' for i in range(num_gpus)}}) elif (args.device == 'cpu'): kwargs = {} else: raise ValueError(f'Invalid device: {args.device}') tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True, **kwargs) if ((args.device == 'cuda') and (num_gpus == 1)): model.cuda() conv = conv_templates[args.conv_template].copy() while True: try: inp = input(f'{conv.roles[0]}: ') except EOFError: inp = '' if (not inp): print('exit...') break conv.append_message(conv.roles[0], inp) conv.append_message(conv.roles[1], None) prompt = conv.get_prompt() params = {'model': model_name, 'prompt': prompt, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'stop': (conv.sep if (conv.sep_style == SeparatorStyle.SINGLE) else conv.sep2)} print(f'{conv.roles[1]}: ', end='', flush=True) pre = 0 for outputs in generate_stream(tokenizer, model, params, args.device): outputs = outputs[(len(prompt) + 1):].strip() outputs = outputs.split(' ') now = len(outputs) if ((now - 1) > pre): print(' '.join(outputs[pre:(now - 1)]), end=' ', flush=True) pre = (now - 1) print(' '.join(outputs[pre:]), flush=True) conv.messages[(- 1)][(- 1)] = ' '.join(outputs) if args.debug: print('\n', {'prompt': prompt, 'outputs': outputs}, '\n')
class DispatchMethod(Enum): LOTTERY = auto() SHORTEST_QUEUE = auto() @classmethod def from_str(cls, name): if (name == 'lottery'): return cls.LOTTERY elif (name == 'shortest_queue'): return cls.SHORTEST_QUEUE else: raise ValueError(f'Invalid dispatch method')
@dataclasses.dataclass class WorkerInfo(): model_names: List[str] speed: int queue_length: int check_heart_beat: bool last_heart_beat: str
def heart_beat_controller(controller): while True: time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION) controller.remove_stable_workers_by_expiration()
class Controller(): def __init__(self, dispatch_method: str): self.worker_info = {} self.dispatch_method = DispatchMethod.from_str(dispatch_method) self.heart_beat_thread = threading.Thread(target=heart_beat_controller, args=(self,)) self.heart_beat_thread.start() logger.info('Init controller') def register_worker(self, worker_name: str, check_heart_beat: bool, worker_status: dict): if (worker_name not in self.worker_info): logger.info(f'Register a new worker: {worker_name}') else: logger.info(f'Register an existing worker: {worker_name}') if (not worker_status): worker_status = self.get_worker_status(worker_name) if (not worker_status): return False self.worker_info[worker_name] = WorkerInfo(worker_status['model_names'], worker_status['speed'], worker_status['queue_length'], check_heart_beat, time.time()) logger.info(f'Register done: {worker_name}, {worker_status}') return True def get_worker_status(self, worker_name: str): try: r = requests.post((worker_name + '/worker_get_status'), timeout=5) except requests.exceptions.RequestException as e: logger.error(f'Get status fails: {worker_name}, {e}') return None if (r.status_code != 200): logger.error(f'Get status fails: {worker_name}, {r}') return None return r.json() def remove_worker(self, worker_name: str): del self.worker_info[worker_name] def refresh_all_workers(self): old_info = dict(self.worker_info) self.worker_info = {} for (w_name, w_info) in old_info.items(): if (not self.register_worker(w_name, w_info.check_heart_beat, None)): logger.info(f'Remove stale worker: {w_name}') def list_models(self): model_names = set() for (w_name, w_info) in self.worker_info.items(): model_names.update(w_info.model_names) return list(model_names) def get_worker_address(self, model_name: str): if (self.dispatch_method == DispatchMethod.LOTTERY): worker_names = [] worker_speeds = [] for (w_name, w_info) in self.worker_info.items(): if (model_name in w_info.model_names): worker_names.append(w_name) worker_speeds.append(w_info.speed) worker_speeds = np.array(worker_speeds, dtype=np.float32) norm = np.sum(worker_speeds) if (norm < 0.0001): return '' worker_speeds = (worker_speeds / norm) if True: pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds) worker_name = worker_names[pt] return worker_name while True: pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds) worker_name = worker_names[pt] if self.get_worker_status(worker_name): break else: self.remove_worker(worker_name) worker_speeds[pt] = 0 norm = np.sum(worker_speeds) if (norm < 0.0001): return '' worker_speeds = (worker_speeds / norm) continue return worker_name elif (self.dispatch_method == DispatchMethod.SHORTEST_QUEUE): worker_names = [] worker_qlen = [] for (w_name, w_info) in self.worker_info.items(): if (model_name in w_info.model_names): worker_names.append(w_name) worker_qlen.append((w_info.queue_length / w_info.speed)) if (len(worker_names) == 0): return '' min_index = np.argmin(worker_qlen) w_name = worker_names[min_index] self.worker_info[w_name].queue_length += 1 logger.info(f'names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}') return w_name else: raise ValueError(f'Invalid dispatch method: {self.dispatch_method}') def receive_heart_beat(self, worker_name: str, queue_length: int): if (worker_name not in self.worker_info): logger.info(f'Receive unknown heart beat. {worker_name}') return False self.worker_info[worker_name].queue_length = queue_length self.worker_info[worker_name].last_heart_beat = time.time() logger.info(f'Receive heart beat. {worker_name}') return True def remove_stable_workers_by_expiration(self): expire = (time.time() - CONTROLLER_HEART_BEAT_EXPIRATION) to_delete = [] for (worker_name, w_info) in self.worker_info.items(): if (w_info.check_heart_beat and (w_info.last_heart_beat < expire)): to_delete.append(worker_name) for worker_name in to_delete: self.remove_worker(worker_name) def worker_api_generate_stream(self, params): worker_addr = self.get_worker_address(params['model']) if (not worker_addr): logger.info(f"no worker: {params['model']}") ret = {'text': server_error_msg, 'error_code': 2} (yield (json.dumps(ret).encode() + b'\x00')) try: response = requests.post((worker_addr + '/worker_generate_stream'), json=params, stream=True, timeout=5) for chunk in response.iter_lines(decode_unicode=False, delimiter=b'\x00'): if chunk: (yield (chunk + b'\x00')) except requests.exceptions.RequestException as e: logger.info(f'worker timeout: {worker_addr}') ret = {'text': server_error_msg, 'error_code': 3} (yield (json.dumps(ret).encode() + b'\x00')) def worker_api_get_status(self): model_names = set() speed = 0 queue_length = 0 for w_name in self.worker_info: worker_status = self.get_worker_status(w_name) if (worker_status is not None): model_names.update(worker_status['model_names']) speed += worker_status['speed'] queue_length += worker_status['queue_length'] return {'model_names': list(model_names), 'speed': speed, 'queue_length': queue_length}
class _Keywords(Enum): NO_VALUE = 'NO_VALUE' FINISHED_ITERATING = 'FINISHED_ITERATING'
@document('style') class Chatbot(Changeable, Selectable, IOComponent, JSONSerializable): '\n Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, and images.\n Preprocessing: this component does *not* accept input.\n Postprocessing: expects function to return a {List[Tuple[str | None | Tuple, str | None | Tuple]]}, a list of tuples with user message and response messages. Messages should be strings, tuples, or Nones. If the message is a string, it can include Markdown. If it is a tuple, it should consist of (string filepath to image/video/audio, [optional string alt text]). Messages that are `None` are not displayed.\n\n Demos: chatbot_simple, chatbot_multimodal\n ' def __init__(self, value: ((List[Tuple[((str | None), (str | None))]] | Callable) | None)=None, color_map: (Dict[(str, str)] | None)=None, *, label: (str | None)=None, every: (float | None)=None, show_label: bool=True, visible: bool=True, elem_id: (str | None)=None, elem_classes: ((List[str] | str) | None)=None, **kwargs): "\n Parameters:\n value: Default value to show in chatbot. If callable, the function will be called whenever the app loads to set the initial value of the component.\n label: component name in interface.\n every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.\n show_label: if True, will display label.\n visible: If False, component will be hidden.\n elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.\n " if (color_map is not None): warnings.warn("The 'color_map' parameter has been deprecated.") self.md = Markdown(extras=['fenced-code-blocks', 'tables', 'break-on-newline']) self.select: EventListenerMethod '\n Event listener for when the user selects message from Chatbot.\n Uses event data gradio.SelectData to carry `value` referring to text of selected message, and `index` tuple to refer to [message, participant] index.\n See EventData documentation on how to use this event data.\n ' IOComponent.__init__(self, label=label, every=every, show_label=show_label, visible=visible, elem_id=elem_id, elem_classes=elem_classes, value=value, **kwargs) def get_config(self): return {'value': self.value, 'selectable': self.selectable, **IOComponent.get_config(self)} @staticmethod def update(value: ((Any | Literal[_Keywords.NO_VALUE]) | None)=_Keywords.NO_VALUE, label: (str | None)=None, show_label: (bool | None)=None, visible: (bool | None)=None): updated_config = {'label': label, 'show_label': show_label, 'visible': visible, 'value': value, '__type__': 'update'} return updated_config def _process_chat_messages(self, chat_message: ((((str | Tuple) | List) | Dict) | None)) -> ((str | Dict) | None): if (chat_message is None): return None elif isinstance(chat_message, (tuple, list)): mime_type = processing_utils.get_mimetype(chat_message[0]) return {'name': chat_message[0], 'mime_type': mime_type, 'alt_text': (chat_message[1] if (len(chat_message) > 1) else None), 'data': None, 'is_file': True} elif isinstance(chat_message, dict): return chat_message elif isinstance(chat_message, str): return str(self.md.convert(chat_message)) else: raise ValueError(f'Invalid message for Chatbot component: {chat_message}') def postprocess(self, y: List[Tuple[(((((str | Tuple) | List) | Dict) | None), ((((str | Tuple) | List) | Dict) | None))]]) -> List[Tuple[(((str | Dict) | None), ((str | Dict) | None))]]: '\n Parameters:\n y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.\n Returns:\n List of tuples representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information.\n ' if (y is None): return [] processed_messages = [] for message_pair in y: assert isinstance(message_pair, (tuple, list)), f'Expected a list of lists or list of tuples. Received: {message_pair}' assert (len(message_pair) == 2), f'Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}' processed_messages.append(((('<pre style="font-family: var(--font)">' + message_pair[0]) + '</pre>'), self._process_chat_messages(message_pair[1]))) return processed_messages def style(self, height: (int | None)=None, **kwargs): '\n This method can be used to change the appearance of the Chatbot component.\n ' if (height is not None): self._style['height'] = height if (kwargs.get('color_map') is not None): warnings.warn("The 'color_map' parameter has been deprecated.") Component.style(self, **kwargs) return self
def get_conv_log_filename(): t = datetime.datetime.now() name = os.path.join(LOGDIR, f'{t.year}-{t.month:02d}-{t.day:02d}-conv.json') return name
def get_model_list(): ret = requests.post((args.controller_url + '/refresh_all_workers')) assert (ret.status_code == 200) ret = requests.post((args.controller_url + '/list_models')) models = ret.json()['models'] models.sort(key=(lambda x: priority.get(x, x))) logger.info(f'Models: {models}') return models
def load_demo(url_params, request: gr.Request): logger.info(f'load_demo. ip: {request.client.host}. params: {url_params}') dropdown_update = gr.Dropdown.update(visible=True) if ('model' in url_params): model = url_params['model'] if (model in models): dropdown_update = gr.Dropdown.update(value=model, visible=True) state = default_conversation.copy() return (state, dropdown_update, gr.Chatbot.update(visible=True), gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.Row.update(visible=True), gr.Accordion.update(visible=True))
def load_demo_refresh_model_list(request: gr.Request): logger.info(f'load_demo. ip: {request.client.host}') models = get_model_list() state = default_conversation.copy() return (state, gr.Dropdown.update(choices=models, value=(models[0] if (len(models) > 0) else '')), gr.Chatbot.update(visible=True), gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.Row.update(visible=True), gr.Accordion.update(visible=True))
def vote_last_response(state, vote_type, model_selector, request: gr.Request): with open(get_conv_log_filename(), 'a') as fout: data = {'tstamp': round(time.time(), 4), 'type': vote_type, 'model': model_selector, 'state': state.dict(), 'ip': request.client.host} fout.write((json.dumps(data) + '\n'))
def upvote_last_response(state, model_selector, request: gr.Request): logger.info(f'upvote. ip: {request.client.host}') vote_last_response(state, 'upvote', model_selector, request) return (('',) + ((disable_btn,) * 3))
def downvote_last_response(state, model_selector, request: gr.Request): logger.info(f'downvote. ip: {request.client.host}') vote_last_response(state, 'downvote', model_selector, request) return (('',) + ((disable_btn,) * 3))
def flag_last_response(state, model_selector, request: gr.Request): logger.info(f'flag. ip: {request.client.host}') vote_last_response(state, 'flag', model_selector, request) return (('',) + ((disable_btn,) * 3))
def regenerate(state, image_process_mode, request: gr.Request): logger.info(f'regenerate. ip: {request.client.host}') state.messages[(- 1)][(- 1)] = None prev_human_msg = state.messages[(- 2)] if (type(prev_human_msg[1]) in (tuple, list)): prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode) state.skip_next = False return ((state, state.to_gradio_chatbot(), '', None) + ((disable_btn,) * 5))
def clear_history(request: gr.Request): logger.info(f'clear_history. ip: {request.client.host}') state = default_conversation.copy() return ((state, state.to_gradio_chatbot(), '', None) + ((disable_btn,) * 5))
def add_text(state, text, image, image_process_mode, request: gr.Request): logger.info(f'add_text. ip: {request.client.host}. len: {len(text)}') if ((len(text) <= 0) and (image is None)): state.skip_next = True return ((state, state.to_gradio_chatbot(), '', None) + ((no_change_btn,) * 5)) if args.moderate: flagged = violates_moderation(text) if flagged: state.skip_next = True return ((state, state.to_gradio_chatbot(), moderation_msg, None) + ((no_change_btn,) * 5)) text = text[:1536] if (image is not None): text = text[:1200] if ('<image>' not in text): text = (text + '\n<image>') text = (text, image, image_process_mode) state = default_conversation.copy() state.append_message(state.roles[0], text) state.append_message(state.roles[1], None) state.skip_next = False return ((state, state.to_gradio_chatbot(), '', None) + ((disable_btn,) * 5))
def post_process_code(code): sep = '\n```' if (sep in code): blocks = code.split(sep) if ((len(blocks) % 2) == 1): for i in range(1, len(blocks), 2): blocks[i] = blocks[i].replace('\\_', '_') code = sep.join(blocks) return code
def http_bot(state, model_selector, temperature, max_new_tokens, request: gr.Request): logger.info(f'http_bot. ip: {request.client.host}') start_tstamp = time.time() model_name = model_selector if state.skip_next: (yield ((state, state.to_gradio_chatbot()) + ((no_change_btn,) * 5))) return if (len(state.messages) == (state.offset + 2)): if ('llava' in model_name.lower()): if ('v1' in model_name.lower()): template_name = 'llava_v1' elif ('mpt' in model_name.lower()): template_name = 'mpt_multimodal' else: template_name = 'multimodal' elif ('mpt' in model_name): template_name = 'mpt_text' elif ('koala' in model_name): template_name = 'bair_v1' elif ('v1' in model_name): template_name = 'vicuna_v1_1' else: template_name = 'v1' new_state = conv_templates[template_name].copy() new_state.append_message(new_state.roles[0], state.messages[(- 2)][1]) new_state.append_message(new_state.roles[1], None) state = new_state controller_url = args.controller_url ret = requests.post((controller_url + '/get_worker_address'), json={'model': model_name}) worker_addr = ret.json()['address'] logger.info(f'model_name: {model_name}, worker_addr: {worker_addr}') if (worker_addr == ''): state.messages[(- 1)][(- 1)] = server_error_msg (yield (state, state.to_gradio_chatbot(), disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)) return prompt = state.get_prompt() all_images = state.get_images(return_pil=True) all_image_hash = [hashlib.md5(image.tobytes()).hexdigest() for image in all_images] for (image, hash) in zip(all_images, all_image_hash): t = datetime.datetime.now() filename = os.path.join(LOGDIR, 'serve_images', f'{t.year}-{t.month:02d}-{t.day:02d}', f'{hash}.jpg') if (not os.path.isfile(filename)): os.makedirs(os.path.dirname(filename), exist_ok=True) image.save(filename) pload = {'model': model_name, 'prompt': prompt, 'temperature': float(temperature), 'max_new_tokens': min(int(max_new_tokens), 1536), 'stop': (state.sep if (state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT]) else state.sep2), 'images': f'List of {len(state.get_images())} images: {all_image_hash}'} logger.info(f'''==== request ==== {pload}''') pload['images'] = state.get_images() state.messages[(- 1)][(- 1)] = 'โ–Œ' (yield ((state, state.to_gradio_chatbot()) + ((disable_btn,) * 5))) try: response = requests.post((worker_addr + '/worker_generate_stream'), headers=headers, json=pload, stream=True, timeout=10) for chunk in response.iter_lines(decode_unicode=False, delimiter=b'\x00'): if chunk: data = json.loads(chunk.decode()) if (data['error_code'] == 0): output = data['text'][len(prompt):].strip() output = post_process_code(output) state.messages[(- 1)][(- 1)] = (output + 'โ–Œ') (yield ((state, state.to_gradio_chatbot()) + ((disable_btn,) * 5))) else: output = (data['text'] + f" (error_code: {data['error_code']})") state.messages[(- 1)][(- 1)] = output (yield ((state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn))) return time.sleep(0.03) except requests.exceptions.RequestException as e: state.messages[(- 1)][(- 1)] = server_error_msg (yield ((state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn))) return state.messages[(- 1)][(- 1)] = state.messages[(- 1)][(- 1)][:(- 1)] (yield ((state, state.to_gradio_chatbot()) + ((enable_btn,) * 5))) finish_tstamp = time.time() logger.info(f'{output}') with open(get_conv_log_filename(), 'a') as fout: data = {'tstamp': round(finish_tstamp, 4), 'type': 'chat', 'model': model_name, 'start': round(start_tstamp, 4), 'finish': round(start_tstamp, 4), 'state': state.dict(), 'images': all_image_hash, 'ip': request.client.host} fout.write((json.dumps(data) + '\n'))
def build_demo(embed_mode): textbox = gr.Textbox(show_label=False, placeholder='Enter text and press ENTER', visible=False).style(container=False) with gr.Blocks(title='LLaVA', theme=gr.themes.Base(), css=css) as demo: state = gr.State() if (not embed_mode): gr.Markdown(title_markdown) with gr.Row(): with gr.Column(scale=3): with gr.Row(elem_id='model_selector_row'): model_selector = gr.Dropdown(choices=models, value=(models[0] if (len(models) > 0) else ''), interactive=True, show_label=False).style(container=False) imagebox = gr.Image(type='pil') image_process_mode = gr.Radio(['Crop', 'Resize', 'Pad'], value='Crop', label='Preprocess for non-square image') cur_dir = os.path.dirname(os.path.abspath(__file__)) gr.Examples(examples=[[f'{cur_dir}/examples/extreme_ironing.jpg', 'What is unusual about this image?'], [f'{cur_dir}/examples/waterview.jpg', 'What are the things I should be cautious about when I visit here?']], inputs=[imagebox, textbox]) with gr.Accordion('Parameters', open=False, visible=False) as parameter_row: temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, step=0.1, interactive=True, label='Temperature') max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label='Max output tokens') with gr.Column(scale=6): chatbot = grChatbot(elem_id='chatbot', label='LLaVA Chatbot', visible=False).style(height=550) with gr.Row(): with gr.Column(scale=8): textbox.render() with gr.Column(scale=1, min_width=60): submit_btn = gr.Button(value='Submit', visible=False) with gr.Row(visible=False) as button_row: upvote_btn = gr.Button(value='๐Ÿ‘ Upvote', interactive=False) downvote_btn = gr.Button(value='๐Ÿ‘Ž Downvote', interactive=False) flag_btn = gr.Button(value='โš ๏ธ Flag', interactive=False) regenerate_btn = gr.Button(value='๐Ÿ”„ Regenerate', interactive=False) clear_btn = gr.Button(value='๐Ÿ—‘๏ธ Clear history', interactive=False) if (not embed_mode): gr.Markdown(tos_markdown) gr.Markdown(learn_more_markdown) url_params = gr.JSON(visible=False) btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn] upvote_btn.click(upvote_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn]) downvote_btn.click(downvote_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn]) flag_btn.click(flag_last_response, [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn]) regenerate_btn.click(regenerate, [state, image_process_mode], ([state, chatbot, textbox, imagebox] + btn_list)).then(http_bot, [state, model_selector, temperature, max_output_tokens], ([state, chatbot] + btn_list)) clear_btn.click(clear_history, None, ([state, chatbot, textbox, imagebox] + btn_list)) textbox.submit(add_text, [state, textbox, imagebox, image_process_mode], ([state, chatbot, textbox, imagebox] + btn_list)).then(http_bot, [state, model_selector, temperature, max_output_tokens], ([state, chatbot] + btn_list)) submit_btn.click(add_text, [state, textbox, imagebox, image_process_mode], ([state, chatbot, textbox, imagebox] + btn_list)).then(http_bot, [state, model_selector, temperature, max_output_tokens], ([state, chatbot] + btn_list)) if (args.model_list_mode == 'once'): demo.load(load_demo, [url_params], [state, model_selector, chatbot, textbox, submit_btn, button_row, parameter_row], _js=get_window_url_params) elif (args.model_list_mode == 'reload'): demo.load(load_demo_refresh_model_list, None, [state, model_selector, chatbot, textbox, submit_btn, button_row, parameter_row]) else: raise ValueError(f'Unknown model list mode: {args.model_list_mode}') return demo
def heart_beat_worker(controller): while True: time.sleep(WORKER_HEART_BEAT_INTERVAL) controller.send_heart_beat()
def load_model(model_path, model_name, num_gpus): if (num_gpus == 1): kwargs = {} else: kwargs = {'device_map': 'auto', 'max_memory': {i: '13GiB' for i in range(num_gpus)}} tokenizer = AutoTokenizer.from_pretrained(model_path) if ('llava' in model_name.lower()): if ('mpt' in model_name.lower()): model = LlavaMPTForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, **kwargs) else: model = LlavaLlamaForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, **kwargs) elif ('mpt' in model_name.lower()): model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs) else: model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, **kwargs) image_processor = None if ('llava' in model_name.lower()): from transformers import CLIPImageProcessor, CLIPVisionModel image_processor = CLIPImageProcessor.from_pretrained(model.config.mm_vision_tower, torch_dtype=torch.float16) mm_use_im_start_end = getattr(model.config, 'mm_use_im_start_end', False) tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) if mm_use_im_start_end: tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True) vision_tower = model.get_model().vision_tower[0] if (vision_tower.device.type == 'meta'): vision_tower = CLIPVisionModel.from_pretrained(vision_tower.config._name_or_path, torch_dtype=torch.float16, low_cpu_mem_usage=True).cuda() model.get_model().vision_tower[0] = vision_tower else: vision_tower.to(device='cuda', dtype=torch.float16) vision_config = vision_tower.config vision_config.im_patch_token = tokenizer.convert_tokens_to_ids([DEFAULT_IMAGE_PATCH_TOKEN])[0] vision_config.use_im_start_end = mm_use_im_start_end if mm_use_im_start_end: (vision_config.im_start_token, vision_config.im_end_token) = tokenizer.convert_tokens_to_ids([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN]) if (num_gpus == 1): model.cuda() if hasattr(model.config, 'max_sequence_length'): context_len = model.config.max_sequence_length else: context_len = 2048 return (tokenizer, model, image_processor, context_len)
class ModelWorker(): def __init__(self, controller_addr, worker_addr, worker_id, no_register, model_path, model_name, keep_aspect_ratio, num_gpus): self.controller_addr = controller_addr self.worker_addr = worker_addr self.worker_id = worker_id if model_path.endswith('/'): model_path = model_path[:(- 1)] if (model_name is None): model_paths = model_path.split('/') if model_paths[(- 1)].startswith('checkpoint-'): self.model_name = ((model_paths[(- 2)] + '_') + model_paths[(- 1)]) else: self.model_name = model_paths[(- 1)] else: self.model_name = model_name logger.info(f'Loading the model {self.model_name} on worker {worker_id} ...') self.keep_aspect_ratio = keep_aspect_ratio (self.tokenizer, self.model, self.image_processor, self.context_len) = load_model(model_path, self.model_name, num_gpus) self.is_multimodal = ('llava' in model_path.lower()) if (not no_register): self.register_to_controller() self.heart_beat_thread = threading.Thread(target=heart_beat_worker, args=(self,)) self.heart_beat_thread.start() def register_to_controller(self): logger.info('Register to controller') url = (self.controller_addr + '/register_worker') data = {'worker_name': self.worker_addr, 'check_heart_beat': True, 'worker_status': self.get_status()} r = requests.post(url, json=data) assert (r.status_code == 200) def send_heart_beat(self): logger.info(f'Send heart beat. Models: {[self.model_name]}. Semaphore: {pretty_print_semaphore(model_semaphore)}. global_counter: {global_counter}') url = (self.controller_addr + '/receive_heart_beat') while True: try: ret = requests.post(url, json={'worker_name': self.worker_addr, 'queue_length': self.get_queue_length()}, timeout=5) exist = ret.json()['exist'] break except requests.exceptions.RequestException as e: logger.error(f'heart beat error: {e}') time.sleep(5) if (not exist): self.register_to_controller() def get_queue_length(self): if (model_semaphore is None): return 0 else: return ((args.limit_model_concurrency - model_semaphore._value) + (len(model_semaphore._waiters) if (model_semaphore._waiters is not None) else 0)) def get_status(self): return {'model_names': [self.model_name], 'speed': 1, 'queue_length': self.get_queue_length()} @torch.inference_mode() def generate_stream(self, params): (tokenizer, model, image_processor) = (self.tokenizer, self.model, self.image_processor) prompt = params['prompt'] ori_prompt = prompt images = params.get('images', None) if ((images is not None) and (len(images) > 0) and self.is_multimodal): from PIL import Image from io import BytesIO import base64 assert (type(images) is list) if (len(images) > 0): images = [Image.open(BytesIO(base64.b64decode(image))) for image in images] assert (len(images) == prompt.count(DEFAULT_IMAGE_TOKEN)), 'Number of images does not match number of <image> tokens in prompt' if self.keep_aspect_ratio: new_images = [] for (image_idx, image) in enumerate(images): (max_hw, min_hw) = (max(image.size), min(image.size)) aspect_ratio = (max_hw / min_hw) (max_len, min_len) = (448, 224) shortest_edge = int(min((max_len / aspect_ratio), min_len)) image = image_processor.preprocess(image, return_tensors='pt', do_center_crop=False, size={'shortest_edge': shortest_edge})['pixel_values'][0] new_images.append(image.to(self.model.device, dtype=torch.float16)) cur_token_len = ((image.shape[1] // 14) * (image.shape[2] // 14)) replace_token = (DEFAULT_IMAGE_PATCH_TOKEN * cur_token_len) if getattr(self.model.config, 'mm_use_im_start_end', False): replace_token = ((DEFAULT_IM_START_TOKEN + replace_token) + DEFAULT_IM_END_TOKEN) prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token, 1) images = new_images else: images = image_processor(images, return_tensors='pt')['pixel_values'] images = images.to(self.model.device, dtype=torch.float16) replace_token = (DEFAULT_IMAGE_PATCH_TOKEN * 256) if getattr(self.model.config, 'mm_use_im_start_end', False): replace_token = ((DEFAULT_IM_START_TOKEN + replace_token) + DEFAULT_IM_END_TOKEN) prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token) else: images = None image_args = {'images': images} else: images = None image_args = {} l_prompt = len(prompt) temperature = float(params.get('temperature', 1.0)) max_new_tokens = min(int(params.get('max_new_tokens', 256)), 1024) stop_str = params.get('stop', None) stop_idx = None if (stop_str is not None): stop_idx = tokenizer(stop_str).input_ids if (len(stop_idx) == 1): stop_idx = stop_idx[0] else: stop_idx = None input_ids = tokenizer(prompt).input_ids output_ids = list(input_ids) pred_ids = [] max_src_len = ((self.context_len - max_new_tokens) - 8) input_ids = input_ids[(- max_src_len):] past_key_values = None for i in range(max_new_tokens): if (i == 0): out = model(torch.as_tensor([input_ids]).cuda(), use_cache=True, **image_args) logits = out.logits past_key_values = out.past_key_values else: attention_mask = torch.ones(1, (past_key_values[0][0].shape[(- 2)] + 1), device='cuda') out = model(input_ids=torch.as_tensor([[token]], device='cuda'), use_cache=True, attention_mask=attention_mask, past_key_values=past_key_values) logits = out.logits past_key_values = out.past_key_values last_token_logits = logits[0][(- 1)] if (temperature < 0.0001): token = int(torch.argmax(last_token_logits)) else: probs = torch.softmax((last_token_logits / temperature), dim=(- 1)) token = int(torch.multinomial(probs, num_samples=1)) output_ids.append(token) pred_ids.append(token) if ((stop_idx is not None) and (token == stop_idx)): stopped = True elif (token == tokenizer.eos_token_id): stopped = True else: stopped = False if (((i % args.stream_interval) == 0) or (i == (max_new_tokens - 1)) or stopped): cur_out = tokenizer.decode(pred_ids, skip_special_tokens=True) pos = cur_out.rfind(stop_str) if (pos != (- 1)): cur_out = cur_out[:pos] stopped = True output = (ori_prompt + cur_out) ret = {'text': output, 'error_code': 0} (yield (json.dumps(ret).encode() + b'\x00')) if stopped: break if (past_key_values is not None): del past_key_values def generate_stream_gate(self, params): try: for x in self.generate_stream(params): (yield x) except ValueError as e: print('Caught ValueError:', e) ret = {'text': server_error_msg, 'error_code': 1} (yield (json.dumps(ret).encode() + b'\x00')) except torch.cuda.CudaError as e: print('Caught torch.cuda.CudaError:', e) ret = {'text': server_error_msg, 'error_code': 1} (yield (json.dumps(ret).encode() + b'\x00'))
def release_model_semaphore(fn=None): model_semaphore.release() if (fn is not None): fn()
def main(): if args.worker_address: worker_addr = args.worker_address else: controller_addr = args.controller_address ret = requests.post((controller_addr + '/refresh_all_workers')) ret = requests.post((controller_addr + '/list_models')) models = ret.json()['models'] models.sort() print(f'Models: {models}') ret = requests.post((controller_addr + '/get_worker_address'), json={'model': args.model_name}) worker_addr = ret.json()['address'] print(f'worker_addr: {worker_addr}') if (worker_addr == ''): return conv = default_conversation.copy() conv.append_message(conv.roles[0], args.message) prompt = conv.get_prompt() headers = {'User-Agent': 'LLaVA Client'} pload = {'model': args.model_name, 'prompt': prompt, 'max_new_tokens': args.max_new_tokens, 'temperature': 0.7, 'stop': conv.sep} response = requests.post((worker_addr + '/worker_generate_stream'), headers=headers, json=pload, stream=True) print(prompt.replace(conv.sep, '\n'), end='') for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b'\x00'): if chunk: data = json.loads(chunk.decode('utf-8')) output = data['text'].split(conv.sep)[(- 1)] print(output, end='\r') print('')
def forward(self, hidden_states: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, use_cache: bool=False) -> Tuple[(torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]])]: 'Input shape: Batch x Time x Channel\n \n attention_mask: [bsz, q_len]\n ' (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[(- 2)] offset = 0 if (past_key_value is not None): offset = past_key_value[0].shape[(- 2)] kv_seq_len += offset (cos, sin) = self.rotary_emb(value_states, seq_len=kv_seq_len) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin, offset=offset) assert (not output_attentions), 'output_attentions is not supported' assert (not use_cache), 'use_cache is not supported' assert (past_key_value is None), 'past_key_value is not supported' qkv = torch.stack([query_states, key_states, value_states], dim=2) qkv = qkv.transpose(1, 3) key_padding_mask = attention_mask if (key_padding_mask is None): qkv = rearrange(qkv, 'b s ... -> (b s) ...') max_s = q_len cu_q_lens = torch.arange(0, ((bsz + 1) * q_len), step=q_len, dtype=torch.int32, device=qkv.device) output = flash_attn_unpadded_qkvpacked_func(qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True) output = rearrange(output, '(b s) ... -> b s ...', b=bsz) else: nheads = qkv.shape[(- 2)] x = rearrange(qkv, 'b s three h d -> b s (three h d)') (x_unpad, indices, cu_q_lens, max_s) = unpad_input(x, key_padding_mask) x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads) output_unpad = flash_attn_unpadded_qkvpacked_func(x_unpad, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True) output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices, bsz, q_len), 'b s (h d) -> b s h d', h=nheads) return (self.o_proj(rearrange(output, 'b s h d -> b s (h d)')), None, None)
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): return attention_mask
def replace_llama_attn_with_flash_attn(): transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = _prepare_decoder_attention_mask transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
def unwrap_model(model: nn.Module) -> nn.Module: '\n Recursively unwraps a model from potential containers (as used in distributed training).\n\n Args:\n model (`torch.nn.Module`): The model to unwrap.\n ' if hasattr(model, 'module'): return unwrap_model(model.module) else: return model
class LLaVATrainer(Trainer): def _save(self, output_dir: Optional[str]=None, state_dict=None): if getattr(self.args, 'tune_mm_mlp_adapter', False): _state_dict = state_dict if (_state_dict is None): model_to_save = unwrap_model(self.model) _state_dict = model_to_save.state_dict() weight_to_save = {} keys_to_match = ['mm_projector', 'embed_tokens', 'embed_in'] for (k, v) in _state_dict.items(): if any(((key_match in k) for key_match in keys_to_match)): weight_to_save[k] = v current_folder = output_dir.split('/')[(- 1)] parent_folder = os.path.dirname(output_dir) if current_folder.startswith('checkpoint-'): mm_projector_folder = os.path.join(parent_folder, 'mm_projector') os.makedirs(mm_projector_folder, exist_ok=True) torch.save(weight_to_save, os.path.join(mm_projector_folder, f'{current_folder}.bin')) else: torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin')) super(LLaVATrainer, self)._save(output_dir, state_dict)
@dataclass class ModelArguments(): model_name_or_path: Optional[str] = field(default='facebook/opt-125m') version: Optional[str] = field(default='v0') freeze_backbone: bool = field(default=False) tune_mm_mlp_adapter: bool = field(default=False) vision_tower: Optional[str] = field(default=None) mm_vision_select_layer: Optional[int] = field(default=(- 1)) pretrain_mm_mlp_adapter: Optional[str] = field(default=None) mm_use_im_start_end: bool = field(default=False) with_spi: bool = field(default=True)
@dataclass class DataArguments(): data_path: str = field(default=None, metadata={'help': 'Path to the training data.'}) lazy_preprocess: bool = False is_multimodal: bool = False sep_image_conv_front: bool = False image_token_len: int = 0 image_folder: Optional[str] = field(default=None) image_aspect_ratio: str = 'square'
@dataclass class TrainingArguments(transformers.TrainingArguments): cache_dir: Optional[str] = field(default=None) optim: str = field(default='adamw_torch') remove_unused_columns: bool = field(default=False) freeze_mm_mlp_adapter: bool = field(default=False) force_fsdp: bool = field(default=False) model_max_length: int = field(default=512, metadata={'help': 'Maximum sequence length. Sequences will be right padded (and possibly truncated).'})
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str): 'Collects the state dict and dump to disk.' state_dict = trainer.model.state_dict() if trainer.args.should_save: cpu_state_dict = {key: value.cpu() for (key, value) in state_dict.items()} del state_dict trainer._save(output_dir, state_dict=cpu_state_dict)
def smart_tokenizer_and_embedding_resize(special_tokens_dict: Dict, tokenizer: transformers.PreTrainedTokenizer, model: transformers.PreTrainedModel): 'Resize tokenizer and embedding.\n\n Note: This is the unoptimized version that may make your embedding size not be divisible by 64.\n ' num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict) model.resize_token_embeddings(len(tokenizer)) if (num_new_tokens > 0): input_embeddings = model.get_input_embeddings().weight.data output_embeddings = model.get_output_embeddings().weight.data input_embeddings_avg = input_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True) output_embeddings_avg = output_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True) input_embeddings[(- num_new_tokens):] = input_embeddings_avg output_embeddings[(- num_new_tokens):] = output_embeddings_avg
def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict: 'Tokenize a list of strings.' tokenized_list = [tokenizer(text, return_tensors='pt', padding='longest', max_length=tokenizer.model_max_length, truncation=True) for text in strings] input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list] input_ids_lens = labels_lens = [tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list] return dict(input_ids=input_ids, labels=labels, input_ids_lens=input_ids_lens, labels_lens=labels_lens)
def _mask_targets(target, tokenized_lens, speakers): cur_idx = tokenized_lens[0] tokenized_lens = tokenized_lens[1:] target[:cur_idx] = IGNORE_INDEX for (tokenized_len, speaker) in zip(tokenized_lens, speakers): if (speaker == 'human'): target[(cur_idx + 2):(cur_idx + tokenized_len)] = IGNORE_INDEX cur_idx += tokenized_len