| | """Module containing the CustomGemma2PromptTokenizingStrategy class""" |
| |
|
| | |
| | import copy |
| | import logging |
| | from collections import defaultdict |
| | from typing import Generator, List, Tuple |
| |
|
| | |
| | from axolotl.prompt_tokenizers import ( |
| | PromptTokenizingStrategy, |
| | parse_tokenized_to_result, |
| | tokenize_prompt_default, |
| | ) |
| |
|
| | |
| | LOG = logging.getLogger("axolotl") |
| |
|
| | |
| | IGNORE_TOKEN_ID = -100 |
| |
|
| |
|
| | class CustomGemma2PromptTokenizingStrategy(PromptTokenizingStrategy): |
| | """ |
| | Tokenizing strategy for CustomGemma2. |
| | """ |
| |
|
| | def __init__(self, prompter, tokenizer, *args, **kwargs): |
| | |
| | super().__init__(prompter, tokenizer, *args, **kwargs) |
| |
|
| | def tokenize_prompt(self, prompt): |
| | |
| | result, current_len = tokenize_prompt_default() |
| |
|
| | |
| | strip_bos = False |
| |
|
| | |
| | if "conversations" in prompt: |
| | conversation_name = "conversations" |
| | elif "conversation" in prompt: |
| | conversation_name = "conversation" |
| | else: |
| | LOG.warning(f"sample does not contain 'conversations' or 'conversation'") |
| | exit() |
| |
|
| | |
| | num_turns = len(prompt[conversation_name]) |
| | for i, turn in enumerate(prompt[conversation_name]): |
| | |
| | if i == 0: |
| | strip_bos = False |
| | add_new_line = "" |
| | else: |
| | strip_bos = True |
| | add_new_line = "\n" |
| |
|
| | |
| | if i == num_turns - 1: |
| | end_of_text = True |
| | else: |
| | end_of_text = False |
| |
|
| | |
| | sharegpt_from, sharegpt_value = turn["from"].strip(), turn["value"].strip() |
| | if sharegpt_from == "system": |
| | role_name = "system" |
| | elif sharegpt_from == "human": |
| | role_name = "user" |
| | elif sharegpt_from == "human-chat": |
| | role_name = "user" |
| | sharegpt_value = f"{turn['name'].strip()}: {sharegpt_value}" |
| | elif sharegpt_from == "gpt": |
| | role_name = "model" |
| | elif sharegpt_from == "gpt-chat": |
| | role_name = "model" |
| | sharegpt_value = f"{turn['name'].strip()}: {sharegpt_value}" |
| | else: |
| | LOG.warning(f"'from' contains an unhandled string: {sharegpt_from}") |
| | exit() |
| |
|
| | |
| | prefix = self._tokenize( |
| | f"{add_new_line}<start_of_turn>{role_name}\n", |
| | add_eos_token=False, |
| | strip_bos_token=strip_bos, |
| | ) |
| |
|
| | |
| | res = self._tokenize( |
| | f"{add_new_line}<start_of_turn>{role_name}\n" |
| | f"{sharegpt_value.strip()}<end_of_turn>", |
| | add_eos_token=end_of_text, |
| | strip_bos_token=strip_bos, |
| | ) |
| |
|
| | |
| | if ( |
| | self.train_on_inputs is False |
| | and ( |
| | sharegpt_from == "system" |
| | or sharegpt_from == "human" |
| | or sharegpt_from == "human-chat" |
| | ) |
| | ): |
| | labels = [IGNORE_TOKEN_ID] * len(res["input_ids"]) |
| | |
| | elif ( |
| | self.train_on_inputs is False |
| | and ( |
| | sharegpt_from == "gpt" |
| | or sharegpt_from == "gpt-chat" |
| | ) |
| | ): |
| | labels = ( |
| | [IGNORE_TOKEN_ID] * len(prefix["input_ids"]) |
| | + [*copy.deepcopy(res["input_ids"])][len(prefix["input_ids"]):] |
| | ) |
| | |
| | else: |
| | labels = res["input_ids"] |
| |
|
| | |
| | result, current_len = parse_tokenized_to_result( |
| | result, |
| | current_len, |
| | res, |
| | labels, |
| | pad_token_id=self.tokenizer.pad_token_id, |
| | ) |
| |
|
| | return result |
| |
|
| |
|
| | |
| | class CustomGemma2Prompter: |
| | """ |
| | Prompter for CustomGemma2. |
| | """ |
| |
|
| | def __init__(self, *args, **kwargs): |
| | |
| | pass |
| |
|
| |
|
| | |
| | def load(tokenizer, cfg): |
| | return CustomGemma2PromptTokenizingStrategy( |
| | CustomGemma2Prompter(), |
| | tokenizer, |
| | cfg.train_on_inputs, |
| | cfg.sequence_len |
| | ) |
| |
|