KrushiJethe Recag commited on
Commit
ba1271a
·
0 Parent(s):

Duplicate from Recag/BharatAI

Browse files

Co-authored-by: Recag <Recag@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
Tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c010f9dd4bbab873006617e252f884c3241d3dabfd50c7ebd6ce63ea2c25949d
3
+ size 253302
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 1001,
3
+ "<|startoftext|>": 1000
4
+ }
config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BharatAI"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "config.BharatAIConfig",
7
+ "AutoModelForCausalLM": "model.BharatAI"
8
+ },
9
+ "dropout": 0.2,
10
+ "model_type": "BharatAI",
11
+ "n_embd": 384,
12
+ "n_head": 4,
13
+ "n_layer": 4,
14
+ "torch_dtype": "float32",
15
+ "transformers_version": "4.36.0.dev0",
16
+ "vocab_size": 1000
17
+ }
config.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from transformers import PretrainedConfig
3
+ class BharatAIConfig(PretrainedConfig):
4
+ model_type = "BharatAI" # Change to the appropriate model type name
5
+
6
+ def __init__(
7
+ self,
8
+ vocab_size:int=1000,
9
+ n_embd:int=384,
10
+ n_head:int=4,
11
+ n_layer:int=4,
12
+ dropout:float=0.2,
13
+ **kwargs
14
+ ):
15
+ self.vocab_size = vocab_size
16
+ self.n_embd = n_embd
17
+ self.n_head = n_head
18
+ self.n_layer = n_layer
19
+ self.dropout = dropout
20
+ super().__init__(**kwargs)
21
+
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.36.0.dev0"
4
+ }
model.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PreTrainedModel
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.nn import functional as F
5
+ from .config import BharatAIConfig
6
+ batch_size = 4
7
+ block_size = 128
8
+ max_iters = 10
9
+ learning_rate = 3e-4
10
+ eval_iters = 100
11
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
12
+ print(device)
13
+ # n_embd = 384
14
+ # n_head = 4
15
+ # n_layer = 4
16
+ dropout = 0.2
17
+ # vocab_size=1000
18
+ # model architecture
19
+ class Head(nn.Module):
20
+ """ one head of self-attention """
21
+
22
+ def __init__(self, n_embd, head_size):
23
+ super().__init__()
24
+ self.key = nn.Linear(n_embd, head_size, bias=False)
25
+ self.query = nn.Linear(n_embd, head_size, bias=False)
26
+ self.value = nn.Linear(n_embd, head_size, bias=False)
27
+ self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))
28
+
29
+ self.dropout = nn.Dropout(dropout)
30
+
31
+ def forward(self, x):
32
+ # input of size (batch, time-step, channels)
33
+ # output of size (batch, time-step, head size)
34
+ B,T,C = x.shape
35
+ k = self.key(x) # (B,T,hs)
36
+ q = self.query(x) # (B,T,hs)
37
+ # compute attention scores ("affinities")
38
+ wei = q @ k.transpose(-2,-1) * k.shape[-1]**-0.5 # (B, T, hs) @ (B, hs, T) -> (B, T, T)
39
+ wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) # (B, T, T)
40
+ wei = F.softmax(wei, dim=-1) # (B, T, T)
41
+ wei = self.dropout(wei)
42
+ # perform the weighted aggregation of the values
43
+ v = self.value(x) # (B,T,hs)
44
+ out = wei @ v # (B, T, T) @ (B, T, hs) -> (B, T, hs)
45
+ return out
46
+
47
+
48
+ class MultiHeadAttention(nn.Module):
49
+ """ multiple heads of self-attention in parallel """
50
+
51
+ def __init__(self, num_heads, head_size, n_embd):
52
+ super().__init__()
53
+ self.heads = nn.ModuleList([Head(n_embd,head_size) for _ in range(num_heads)])
54
+ self.proj = nn.Linear(head_size * num_heads, n_embd)
55
+ self.dropout = nn.Dropout(dropout)
56
+
57
+ def forward(self, x):
58
+ out = torch.cat([h(x) for h in self.heads], dim=-1) # (B, T, F) -> (B, T, [h1, h1, h1, h1, h2, h2, h2, h2, h3, h3, h3, h3])
59
+ out = self.dropout(self.proj(out))
60
+ return out
61
+
62
+ class FeedFoward(nn.Module):
63
+ """ a simple linear layer followed by a non-linearity """
64
+
65
+ def __init__(self, n_embd):
66
+ super().__init__()
67
+ self.net = nn.Sequential(
68
+ nn.Linear(n_embd, 4 * n_embd),
69
+ nn.ReLU(),
70
+ nn.Linear(4 * n_embd, n_embd),
71
+ nn.Dropout(dropout),
72
+ )
73
+
74
+ def forward(self, x):
75
+ return self.net(x)
76
+
77
+ class Block(nn.Module):
78
+ """ Transformer block: communication followed by computation """
79
+
80
+ def __init__(self, n_embd, n_head):
81
+ # n_embd: embedding dimension, n_head: the number of heads we'd like
82
+ super().__init__()
83
+ head_size = n_embd // n_head
84
+ self.sa = MultiHeadAttention(n_head, head_size,n_embd)
85
+ self.ffwd = FeedFoward(n_embd)
86
+ self.ln1 = nn.LayerNorm(n_embd)
87
+ self.ln2 = nn.LayerNorm(n_embd)
88
+
89
+
90
+ def forward(self, x):
91
+ y = self.sa(x)
92
+ x = self.ln1(x + y)
93
+ y = self.ffwd(x)
94
+ x = self.ln2(x + y)
95
+ return x
96
+
97
+ class BharatAI(PreTrainedModel):
98
+ config_class = BharatAIConfig
99
+ def __init__(self, config):
100
+ super().__init__(config)
101
+ self.token_embedding_table = nn.Embedding(config.vocab_size, config.n_embd)
102
+ self.position_embedding_table = nn.Embedding(block_size, config.n_embd)
103
+ self.blocks = nn.Sequential(*[Block(config.n_embd, n_head=config.n_head) for _ in range(config.n_layer)])
104
+ self.ln_f = nn.LayerNorm(config.n_embd) # final layer norm
105
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
106
+
107
+ self.apply(self._init_weights)
108
+
109
+
110
+
111
+ def _init_weights(self, module):
112
+ if isinstance(module, nn.Linear):
113
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
114
+ if module.bias is not None:
115
+ torch.nn.init.zeros_(module.bias)
116
+ elif isinstance(module, nn.Embedding):
117
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
118
+
119
+ def forward(self, index, targets=None):
120
+ B, T = index.shape
121
+
122
+
123
+ # idx and targets are both (B,T) tensor of integers
124
+ tok_emb = self.token_embedding_table(index) # (B,T,C)
125
+ pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C)
126
+ x = tok_emb + pos_emb # (B,T,C)
127
+ x = self.blocks(x) # (B,T,C)
128
+ x = self.ln_f(x) # (B,T,C)
129
+ logits = self.lm_head(x) # (B,T,vocab_size)
130
+
131
+ if targets is None:
132
+ loss = None
133
+ else:
134
+ B, T, C = logits.shape
135
+ logits = logits.view(B*T, C)
136
+ targets = targets.view(B*T)
137
+ loss = F.cross_entropy(logits, targets)
138
+
139
+ return logits, loss
140
+
141
+ def generate(self, index, max_new_tokens):
142
+ # index is (B, T) array of indices in the current context
143
+ for _ in range(max_new_tokens):
144
+ # crop idx to the last block_size tokens
145
+ index_cond = index[:, -block_size:]
146
+ # get the predictions
147
+ logits, loss = self.forward(index_cond)
148
+ # focus only on the last time step
149
+ logits = logits[:, -1, :] # becomes (B, C)
150
+ # apply softmax to get probabilities
151
+ probs = F.softmax(logits, dim=-1) # (B, C)
152
+ # sample from the distribution
153
+ index_next = torch.multinomial(probs, num_samples=1) # (B, 1)
154
+ # append sampled index to the running sequence
155
+ index = torch.cat((index, index_next), dim=1) # (B, T+1)
156
+ return index
157
+ # config = BharatAIConfig(
158
+ # vocab_size=1000,
159
+ # n_embd=384,
160
+ # n_head=4,
161
+ # n_layer=4,
162
+ # dropout=0.2,
163
+ # # any other parameters you want to set
164
+ # )
165
+ # model = CustomAI(config)
166
+ # # model.load_state_dict(torch.load('/content/BharatAI.pth'))
167
+
168
+
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bb6468b57c3eeb42251be57a63508cdb291a4b281e81871dcf2c9ec714e418c
3
+ size 32707840
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from shutil import copyfile
3
+ from typing import Any, Dict, List, Optional, Tuple
4
+
5
+ import sentencepiece as spm
6
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
7
+ from transformers.utils import logging
8
+
9
+ logger = logging.get_logger(__name__)
10
+
11
+ VOCAB_FILES_NAMES = {"vocab_file": "Tokenizer.model"}
12
+
13
+ PRETRAINED_VOCAB_FILES_MAP = {
14
+ "vocab_file": {},
15
+ "tokenizer_file": {},
16
+ }
17
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
18
+
19
+
20
+ class BharatAITokenizer(PreTrainedTokenizer):
21
+ """
22
+ Construct a custom tokenizer. Based on byte-level Byte-Pair-Encoding.
23
+ Args:
24
+ vocab_file (`str`):
25
+ Path to the vocabulary file.
26
+ """
27
+
28
+ vocab_files_names = VOCAB_FILES_NAMES
29
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
30
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
31
+ model_input_names = ["input_ids", "attention_mask"]
32
+
33
+ def __init__(
34
+ self,
35
+ vocab_file,
36
+ unk_token="<unk>",
37
+ bos_token="<|startoftext|>",
38
+ eos_token="<|endoftext|>",
39
+ pad_token="<unk>",
40
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
41
+ add_bos_token=True,
42
+ add_eos_token=False,
43
+ clean_up_tokenization_spaces=False,
44
+ **kwargs,
45
+ ):
46
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
47
+ bos_token = (
48
+ AddedToken(bos_token, lstrip=False, rstrip=False)
49
+ if isinstance(bos_token, str)
50
+ else bos_token
51
+ )
52
+ eos_token = (
53
+ AddedToken(eos_token, lstrip=False, rstrip=False)
54
+ if isinstance(eos_token, str)
55
+ else eos_token
56
+ )
57
+ unk_token = (
58
+ AddedToken(unk_token, lstrip=False, rstrip=False)
59
+ if isinstance(unk_token, str)
60
+ else unk_token
61
+ )
62
+ pad_token = (
63
+ AddedToken(pad_token, lstrip=False, rstrip=False)
64
+ if isinstance(pad_token, str)
65
+ else pad_token
66
+ )
67
+ self.vocab_file = vocab_file
68
+ self.add_bos_token = add_bos_token
69
+ self.add_eos_token = add_eos_token
70
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
71
+ self.sp_model.Load(vocab_file)
72
+ super().__init__(
73
+ bos_token=bos_token,
74
+ eos_token=eos_token,
75
+ unk_token=unk_token,
76
+ pad_token=pad_token,
77
+ add_bos_token=add_bos_token,
78
+ add_eos_token=add_eos_token,
79
+ sp_model_kwargs=self.sp_model_kwargs,
80
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
81
+ **kwargs,
82
+ )
83
+
84
+ def __getstate__(self):
85
+ state = self.__dict__.copy()
86
+ state["sp_model"] = None
87
+ return state
88
+
89
+ def __setstate__(self, d):
90
+ self.__dict__ = d
91
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
92
+ self.sp_model.Load(self.vocab_file)
93
+
94
+ @property
95
+ def vocab_size(self):
96
+ """Returns vocab size"""
97
+ return self.sp_model.get_piece_size()
98
+
99
+ def get_vocab(self):
100
+ """Returns vocab as a dict"""
101
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
102
+ vocab.update(self.added_tokens_encoder)
103
+ return vocab
104
+
105
+ def _tokenize(self, text):
106
+ """Returns a tokenized string."""
107
+ return self.sp_model.encode(text, out_type=str)
108
+
109
+ def _convert_token_to_id(self, token):
110
+ """Converts a token (str) in an id using the vocab."""
111
+ return self.sp_model.piece_to_id(token)
112
+
113
+ def _convert_id_to_token(self, index):
114
+ """Converts an index (integer) in a token (str) using the vocab."""
115
+ token = self.sp_model.IdToPiece(index)
116
+ return token
117
+
118
+ def convert_tokens_to_string(self, tokens):
119
+ """Converts a sequence of tokens (string) in a single string."""
120
+ current_sub_tokens = []
121
+ out_string = ""
122
+ prev_is_special = False
123
+ for i, token in enumerate(tokens):
124
+ # make sure that special tokens are not decoded using sentencepiece model
125
+ if token in self.all_special_tokens:
126
+ if not prev_is_special and i != 0:
127
+ out_string += " "
128
+ out_string += self.sp_model.decode(current_sub_tokens) + token
129
+ prev_is_special = True
130
+ current_sub_tokens = []
131
+ else:
132
+ current_sub_tokens.append(token)
133
+ prev_is_special = False
134
+ out_string += self.sp_model.decode(current_sub_tokens)
135
+ return out_string
136
+
137
+ def save_vocabulary(
138
+ self, save_directory, filename_prefix: Optional[str] = None
139
+ ) -> Tuple[str]:
140
+ """
141
+ Save the vocabulary and special tokens file to a directory.
142
+ Args:
143
+ save_directory (`str`):
144
+ The directory in which to save the vocabulary.
145
+ Returns:
146
+ `Tuple(str)`: Paths to the files saved.
147
+ """
148
+ if not os.path.isdir(save_directory):
149
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
150
+ return
151
+ out_vocab_file = os.path.join(
152
+ save_directory,
153
+ (filename_prefix + "-" if filename_prefix else "")
154
+ + VOCAB_FILES_NAMES["vocab_file"],
155
+ )
156
+
157
+ if os.path.abspath(self.vocab_file) != os.path.abspath(
158
+ out_vocab_file
159
+ ) and os.path.isfile(self.vocab_file):
160
+ copyfile(self.vocab_file, out_vocab_file)
161
+ elif not os.path.isfile(self.vocab_file):
162
+ with open(out_vocab_file, "wb") as fi:
163
+ content_spiece_model = self.sp_model.serialized_model_proto()
164
+ fi.write(content_spiece_model)
165
+
166
+ return (out_vocab_file,)
167
+
168
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
169
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
170
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
171
+
172
+ output = bos_token_id + token_ids_0 + eos_token_id
173
+
174
+ if token_ids_1 is not None:
175
+ output = output + bos_token_id + token_ids_1 + eos_token_id
176
+
177
+ return output
178
+
179
+ def get_special_tokens_mask(
180
+ self,
181
+ token_ids_0: List[int],
182
+ token_ids_1: Optional[List[int]] = None,
183
+ already_has_special_tokens: bool = False,
184
+ ) -> List[int]:
185
+ """
186
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
187
+ special tokens using the tokenizer `prepare_for_model` method.
188
+ Args:
189
+ token_ids_0 (`List[int]`):
190
+ List of IDs.
191
+ token_ids_1 (`List[int]`, *optional*):
192
+ Optional second list of IDs for sequence pairs.
193
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
194
+ Whether or not the token list is already formatted with special tokens for the model.
195
+ Returns:
196
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
197
+ """
198
+ if already_has_special_tokens:
199
+ return super().get_special_tokens_mask(
200
+ token_ids_0=token_ids_0,
201
+ token_ids_1=token_ids_1,
202
+ already_has_special_tokens=True,
203
+ )
204
+
205
+ bos_token_id = [1] if self.add_bos_token else []
206
+ eos_token_id = [1] if self.add_eos_token else []
207
+
208
+ if token_ids_1 is None:
209
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
210
+ return (
211
+ bos_token_id
212
+ + ([0] * len(token_ids_0))
213
+ + eos_token_id
214
+ + bos_token_id
215
+ + ([0] * len(token_ids_1))
216
+ + eos_token_id
217
+ )
218
+
219
+ def create_token_type_ids_from_sequences(
220
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
221
+ ) -> List[int]:
222
+ """
223
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
224
+ sequence pair mask has the following format:
225
+ ```
226
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
227
+ | first sequence | second sequence |
228
+ ```
229
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
230
+ Args:
231
+ token_ids_0 (`List[int]`):
232
+ List of ids.
233
+ token_ids_1 (`List[int]`, *optional*):
234
+ Optional second list of IDs for sequence pairs.
235
+ Returns:
236
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
237
+ """
238
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
239
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
240
+
241
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
242
+
243
+ if token_ids_1 is not None:
244
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
245
+
246
+ return output
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1000": {
14
+ "content": "<|startoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "1001": {
22
+ "content": "<|endoftext|>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "auto_map": {
31
+ "AutoTokenizer": [
32
+ "tokenizer.BharatAITokenizer",
33
+ null
34
+ ]
35
+ },
36
+ "bos_token": "<|startoftext|>",
37
+ "clean_up_tokenization_spaces": false,
38
+ "eos_token": "<|endoftext|>",
39
+ "model_max_length": 1000000000000000019884624838656,
40
+ "pad_token": "<unk>",
41
+ "sp_model_kwargs": {},
42
+ "tokenizer_class": "BharatAITokenizer",
43
+ "unk_token": "<unk>"
44
+ }