jcblaise commited on
Commit
790818e
·
verified ·
1 Parent(s): b7c68d5

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "BackpackGPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_backpack_gpt2.BackpackGPT2Config",
9
+ "AutoModelForCausalLM": "modeling_backpack_gpt2.BackpackGPT2LMHeadModel"
10
+ },
11
+ "bos_token_id": 50256,
12
+ "dtype": "float32",
13
+ "embd_pdrop": 0.1,
14
+ "eos_token_id": 50256,
15
+ "initializer_range": 0.02,
16
+ "layer_norm_epsilon": 1e-05,
17
+ "model_type": "gpt2",
18
+ "n_embd": 768,
19
+ "n_head": 12,
20
+ "n_inner": null,
21
+ "n_layer": 12,
22
+ "n_positions": 512,
23
+ "num_senses": 16,
24
+ "pad_token_id": 50256,
25
+ "reorder_and_upcast_attn": false,
26
+ "resid_pdrop": 0.1,
27
+ "scale_attn_by_inverse_layer_idx": true,
28
+ "scale_attn_weights": true,
29
+ "sense_intermediate_scale": 4,
30
+ "summary_activation": null,
31
+ "summary_first_dropout": 0.1,
32
+ "summary_proj_to_labels": true,
33
+ "summary_type": "cls_index",
34
+ "summary_use_proj": true,
35
+ "transformers_version": "4.57.0",
36
+ "use_cache": true,
37
+ "vocab_size": 50264
38
+ }
configuration_backpack_gpt2.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.models.gpt2.configuration_gpt2 import GPT2Config
2
+
3
+ class BackpackGPT2Config(GPT2Config):
4
+ """
5
+ This is the configuration class to store the configuration of a [`GPT2Model`] or a [`TFGPT2Model`]. It is used to
6
+ instantiate a Backpack GPT-2 model according to the specified arguments, defining the model architecture.
7
+
8
+ Configuration objects inherit from [`GPT2Config`] and can be used to control the model outputs. Read the
9
+ documentation from [`GPT2Config`] for more information.
10
+
11
+ Args:
12
+ num_senses (`int`, *optional*, defaults to 16):
13
+ The number of sense vectors to define for each word.
14
+ sense_intermediate_scale (`int`, *optional*, defaults ot 4):
15
+ The hidden dimensionality of the sense vector network.
16
+
17
+ Example:
18
+
19
+ ```python
20
+ >>> from transformers import BackpackGPT2Config, BackpackGPT2Model
21
+
22
+ >>> # Initializing a GPT2 configuration
23
+ >>> configuration = BackpackGPT2Config()
24
+
25
+ >>> # Initializing a model (with random weights) from the configuration
26
+ >>> model = BackpackGPT2Model(configuration)
27
+
28
+ >>> # Accessing the model configuration
29
+ >>> configuration = model.config
30
+ """
31
+
32
+ def __init__(self,
33
+ vocab_size=50264,
34
+ num_senses=16,
35
+ sense_intermediate_scale=4,
36
+ n_positions=512,
37
+ scale_attn_by_inverse_layer_idx=True,
38
+ **kwargs,
39
+ ):
40
+ self.num_senses = num_senses
41
+ self.sense_intermediate_scale = sense_intermediate_scale
42
+ super().__init__(vocab_size=vocab_size, n_positions=n_positions, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, **kwargs)
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": [
5
+ 50256
6
+ ],
7
+ "transformers_version": "4.57.0"
8
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
modeling_backpack_gpt2.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from dataclasses import dataclass
3
+ from typing import Optional, Tuple
4
+
5
+ import torch
6
+ import torch.utils.checkpoint
7
+ from torch import nn
8
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
9
+
10
+ from transformers.activations import ACT2FN
11
+ from transformers.pytorch_utils import Conv1D
12
+ from transformers.utils import (
13
+ ModelOutput,
14
+ logging,
15
+ )
16
+ from transformers.models.gpt2.modeling_gpt2 import GPT2Model, GPT2PreTrainedModel, GenerationMixin
17
+ from transformers.cache_utils import Cache
18
+ from .configuration_backpack_gpt2 import BackpackGPT2Config
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+
23
+ ### Backpack-Specific
24
+ class BackpackGPT2PreTrainedModel(GPT2PreTrainedModel):
25
+ """
26
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
27
+ models.
28
+ """
29
+ _keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias"]
30
+
31
+ config_class = BackpackGPT2Config
32
+ base_model_prefix = "backpack"
33
+ is_parallelizable = True
34
+ supports_gradient_checkpointing = False
35
+ _no_split_modules = ["GPT2Block", "BackpackNoMixBlock"]
36
+
37
+ def __init__(self, *inputs, **kwargs):
38
+ super().__init__(*inputs, **kwargs)
39
+
40
+ class BackpackMLP(nn.Module):
41
+
42
+ def __init__(self, embed_dim, intermediate_dim, out_dim, config):
43
+ super().__init__()
44
+ self.c_fc = Conv1D(intermediate_dim, embed_dim)
45
+ self.c_proj = Conv1D(out_dim, intermediate_dim)
46
+ self.act = ACT2FN[config.activation_function]
47
+ self.dropout = nn.Dropout(config.resid_pdrop)
48
+
49
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
50
+ hidden_states = self.c_fc(hidden_states)
51
+ hidden_states = self.act(hidden_states)
52
+ hidden_states = self.c_proj(hidden_states)
53
+ hidden_states = self.dropout(hidden_states)
54
+ return hidden_states
55
+
56
+ class BackpackNoMixBlock(nn.Module):
57
+
58
+ def __init__(self, config):
59
+ super().__init__()
60
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
61
+ self.ln_2 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
62
+ self.mlp = BackpackMLP(config.n_embd, config.n_embd*4, config.n_embd, config)
63
+ self.resid_dropout1 = nn.Dropout(config.resid_pdrop)
64
+ self.resid_dropout2 = nn.Dropout(config.resid_pdrop)
65
+
66
+ def forward(self, hidden_states, residual):
67
+ residual = self.resid_dropout1(hidden_states) + residual
68
+ hidden_states = self.ln_1(residual)
69
+ mlp_out = self.mlp(hidden_states)
70
+ residual = self.resid_dropout2(mlp_out) + residual
71
+ hidden_states = self.ln_2(residual)
72
+ return hidden_states
73
+
74
+
75
+ class BackpackSenseNetwork(nn.Module):
76
+ def __init__(self, config, num_senses, device=None, dtype=None):
77
+ super().__init__()
78
+ self.num_senses = num_senses
79
+ #self.embeddings = embeddings
80
+ self.n_embd = config.n_embd
81
+
82
+ self.dropout = nn.Dropout(config.embd_pdrop)
83
+ self.block = BackpackNoMixBlock(config)
84
+ self.ln = nn.LayerNorm(self.n_embd, eps=config.layer_norm_epsilon)
85
+ self.final_mlp = BackpackMLP(
86
+ embed_dim=config.n_embd,
87
+ intermediate_dim=config.sense_intermediate_scale*config.n_embd,
88
+ out_dim=config.n_embd*config.num_senses,
89
+ config=config,
90
+ )
91
+
92
+ def forward(self, input_embeds):
93
+ residual = self.dropout(input_embeds)
94
+ hidden_states = self.ln(residual)
95
+ hidden_states = self.block(hidden_states, residual)
96
+ senses = self.final_mlp(hidden_states)
97
+ bs, s, nvd = senses.shape
98
+ return senses.reshape(bs, s, self.num_senses, self.n_embd).transpose(1,2) # (bs, nv, s, d)
99
+
100
+ class BackpackWeightNetwork(nn.Module):
101
+
102
+ def __init__(self, num_senses, embed_dim):
103
+ super().__init__()
104
+ self.n_embd = embed_dim
105
+ self.num_senses = num_senses
106
+ self.embed_per_sense = embed_dim // num_senses
107
+ self.c_attn = nn.Linear(embed_dim, 2 * num_senses * self.embed_per_sense)
108
+ self.softmax_scale = None
109
+
110
+ def forward(self, encoded):
111
+ """
112
+ b, s, d = encoded.shape
113
+ encoded = self.c_attn(encoded) # (b, s, 2*d)
114
+ encoded = encoded.reshape(b, s, 2, self.num_senses, self.embed_per_sense) #(b, s, 2, nv, d//nv)
115
+ batch_size, seqlen = encoded.shape[0], encoded.shape[1]
116
+
117
+ # compute scores & mask
118
+ q, k = encoded.unbind(dim=2)
119
+ softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
120
+ scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
121
+ causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
122
+ scores = scores + causal_mask.to(dtype=scores.dtype)
123
+
124
+ return torch.softmax(scores, dim=-1, dtype=q.dtype)
125
+ """
126
+ b, s, d = encoded.shape
127
+ x = self.c_attn(encoded) # (b, s, 2*d)
128
+ x = x.reshape(b, s, 2, self.num_senses, self.embed_per_sense) # (b, s, 2, nv, d//nv)
129
+
130
+ # q,k: (b, s, nv, d//nv)
131
+ q, k = x.unbind(dim=2)
132
+
133
+ # scale (compute as float32 to reduce rounding error, then cast)
134
+ scale = (self.softmax_scale
135
+ if self.softmax_scale is not None
136
+ else 1.0 / math.sqrt(q.shape[-1]))
137
+ # einsum gives (b, nv, s, s)
138
+ scores = torch.einsum('bthd,bshd->bhts', q, k) * scale # keep native dtype here
139
+
140
+ # boolean causal mask: True = mask-out
141
+ # shape (s, s) → broadcast to (1, 1, s, s) → (b, nv, s, s)
142
+ causal_mask = torch.ones(s, s, device=scores.device, dtype=torch.bool).triu_(1)
143
+ scores = scores.float() # do the numerically sensitive bits in fp32
144
+ scores = scores.masked_fill(causal_mask, float('-inf'))
145
+
146
+ attn = torch.softmax(scores, dim=-1) # fp32 softmax
147
+ attn = attn.to(q.dtype) # cast back (fp16/bf16) for downstream
148
+
149
+ return attn
150
+
151
+ @dataclass
152
+ class BackpackGPT2BaseModelOutput(ModelOutput):
153
+ hidden_states: torch.FloatTensor = None
154
+ contextualization: torch.FloatTensor = None
155
+ senses: torch.FloatTensor = None
156
+
157
+ class BackpackGPT2Model(BackpackGPT2PreTrainedModel):
158
+ _keys_to_ignore_on_load_missing = [r".*attn.masked_bias", r".*attn.bias"]
159
+
160
+ def __init__(self, config):
161
+ super().__init__(config)
162
+
163
+ self.embed_dim = config.n_embd
164
+
165
+ self.num_senses = config.num_senses
166
+ self.gpt2_model = GPT2Model(config)
167
+ self.sense_network = BackpackSenseNetwork(config, self.num_senses, self.gpt2_model.wte)
168
+ self.word_embeddings = self.gpt2_model.wte
169
+ self.position_embeddings = self.gpt2_model.wpe
170
+ self.sense_weight_net = BackpackWeightNetwork(self.num_senses, self.embed_dim)
171
+ # Model parallel
172
+ self.model_parallel = False
173
+ self.device_map = None
174
+ self.gradient_checkpointing = False
175
+
176
+ def get_num_senses(self):
177
+ return self.num_senses
178
+
179
+ def get_word_embeddings(self):
180
+ return self.word_embeddings
181
+
182
+ def get_sense_network(self):
183
+ return self.sense_network
184
+
185
+ def get_input_embeddings(self):
186
+ return self.word_embeddings
187
+
188
+ def set_input_embeddings(self, new_embeddings):
189
+ self.word_embeddings = new_embeddings
190
+
191
+ def forward(
192
+ self,
193
+ input_ids,
194
+ position_ids,
195
+ cache_position: Optional[torch.LongTensor] = None,
196
+ past_key_values: Optional[Cache] = None,
197
+ inputs_embeds: Optional[torch.FloatTensor] = None,
198
+ attention_mask: Optional[torch.FloatTensor] = None,
199
+ use_cache: Optional[bool] = None,
200
+ return_dict: Optional[bool] = None,
201
+ **kwargs):
202
+ # Compute senses
203
+ sense_input_embeds = self.word_embeddings(input_ids)
204
+ senses = self.sense_network(sense_input_embeds) # (bs, nv, s, d)
205
+
206
+ # Compute contextualization weights
207
+ #contextl_hidden_states = self.gpt2_model(input_ids, position_ids=position_ids).last_hidden_state # (bs, s, d)
208
+ contextl_hidden_states = self.gpt2_model(input_ids=input_ids, position_ids=position_ids, **kwargs).last_hidden_state
209
+ contextualization = self.sense_weight_net(contextl_hidden_states) # (bs, nv, s, s)
210
+
211
+ # Compute resulting outputs
212
+ hidden_states = torch.sum(contextualization @ senses, dim=1) # (bs, nv, s, d) -> (bs, s, d)
213
+ return BackpackGPT2BaseModelOutput(
214
+ hidden_states=hidden_states,
215
+ contextualization=contextualization,
216
+ senses=senses
217
+ )
218
+
219
+ def run_with_custom_contextualization(self, input_ids, contextualization):
220
+ # Compute senses
221
+ sense_input_embeds = self.word_embeddings(input_ids)
222
+ senses = self.sense_network(sense_input_embeds) # (bs, nv, s, d)
223
+
224
+ # Compute resulting outputs
225
+ hidden_states = torch.sum(contextualization @ senses, dim=1) # (bs, nv, s, d) -> (bs, s, d)
226
+ return BackpackGPT2BaseModelOutput(
227
+ hidden_states=hidden_states,
228
+ contextualization=contextualization,
229
+ senses=senses
230
+ )
231
+
232
+ @dataclass
233
+ class BackpackGPT2LMHeadModelOutput(ModelOutput):
234
+ logits: torch.FloatTensor = None
235
+ contextualization: torch.FloatTensor = None
236
+ backpack_hidden_states: torch.FloatTensor = None
237
+ loss: Optional[torch.Tensor] = None # smoothed (for optimization)
238
+ loss_unsmoothed: Optional[torch.Tensor] = None # NEW: raw CE (for logging/PPL)
239
+ senses: torch.FloatTensor = None
240
+
241
+ class BackpackGPT2LMHeadModel(BackpackGPT2PreTrainedModel, GenerationMixin):
242
+ _keys_to_ignore_on_load_missing = [r".*attn.masked_bias", r".*attn.bias"]
243
+ accepts_loss_kwargs = False
244
+
245
+ def __init__(self, config):
246
+ super().__init__(config)
247
+ self.backpack = BackpackGPT2Model(config)
248
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
249
+
250
+ # Model parallel
251
+ self.model_parallel = False
252
+ self.device_map = None
253
+
254
+ self.tie_weights()
255
+
256
+ def tie_weights(self):
257
+ self.lm_head.weight = self.backpack.word_embeddings.weight # also tied with the underlying underlying transf
258
+
259
+ def get_lm_head(self):
260
+ return self.lm_head
261
+
262
+ def get_input_embeddings(self):
263
+ return self.backpack.word_embeddings
264
+
265
+ def forward(
266
+ self,
267
+ input_ids,
268
+ position_ids=None,
269
+ labels: Optional[torch.LongTensor] = None,
270
+ label_smoothing: Optional[float] = 0,
271
+ cache_position: Optional[torch.LongTensor] = None,
272
+ past_key_values: Optional[Cache] = None,
273
+ inputs_embeds: Optional[torch.FloatTensor] = None,
274
+ attention_mask: Optional[torch.FloatTensor] = None,
275
+ use_cache: Optional[bool] = None,
276
+ return_dict: Optional[bool] = None,
277
+ **kwargs):
278
+ outputs = self.backpack(input_ids, position_ids=position_ids, **kwargs)
279
+ hidden_states, contextualization = outputs.hidden_states, outputs.contextualization
280
+ senses = outputs.senses
281
+ lm_logits = self.lm_head(hidden_states) # (bs, s, V)
282
+
283
+ loss = None
284
+ loss_unsmoothed = None
285
+
286
+ if labels is not None:
287
+ labels = labels.to(lm_logits.device)
288
+ shift_logits = lm_logits[..., :-1, :].contiguous()
289
+ shift_labels = labels[..., 1:].contiguous()
290
+
291
+ loss_fct = CrossEntropyLoss(ignore_index=-100, reduction='mean', label_smoothing=label_smoothing)
292
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
293
+
294
+ # Reporting loss: **unsmoothed** (no grad) in case we used label smoothing
295
+ with torch.no_grad():
296
+ ce_raw = CrossEntropyLoss(ignore_index=-100, reduction="mean")
297
+ loss_unsmoothed = ce_raw(
298
+ shift_logits.detach().view(-1, shift_logits.size(-1)),
299
+ shift_labels.view(-1)
300
+ )
301
+
302
+ return BackpackGPT2LMHeadModelOutput(
303
+ logits=lm_logits,
304
+ contextualization=contextualization,
305
+ backpack_hidden_states=hidden_states,
306
+ loss=loss,
307
+ loss_unsmoothed=loss_unsmoothed,
308
+ senses=senses
309
+ )
310
+
311
+ def run_with_custom_contextualization(self, input_ids, contextualization):
312
+ outputs = self.backpack.run_with_custom_contextualization(input_ids, contextualization)
313
+ hidden_states, contextualization = outputs.hidden_states, outputs.contextualization
314
+ lm_logits = self.lm_head(hidden_states)
315
+ return BackpackGPT2LMHeadModelOutput(
316
+ logits=lm_logits,
317
+ contextualization=contextualization,
318
+ backpack_hidden_states=hidden_states,
319
+ )
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43c0d2ca509710931a5d450e92b84d27914d58224425e4a7bf0316d298f13118
3
+ size 680390003
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": false,
15
+ "eos_token": "<|endoftext|>",
16
+ "extra_special_tokens": {},
17
+ "model_max_length": 1024,
18
+ "pad_token": "<|endoftext|>",
19
+ "tokenizer_class": "GPT2Tokenizer",
20
+ "unk_token": "<|endoftext|>"
21
+ }
training_args.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_lm_loss": true,
3
+ "align_pct": 0.2,
4
+ "clip_grad_norm": 1.0,
5
+ "dataset_config": "eng-ind",
6
+ "dataset_name": "jcblaise/backpack-parallel",
7
+ "dataset_split": "train",
8
+ "eval_every_n_steps": 2000,
9
+ "freeze_sense_during_polish": true,
10
+ "label_smoothing": 0.05,
11
+ "learning_rate": 5e-05,
12
+ "log_every_n_steps": 100,
13
+ "max_checkpoints": 1,
14
+ "max_length": 256,
15
+ "max_steps": 150000,
16
+ "model_id": "/home/mila/b/blaisej/workspace/trained-models/backpack-gpt2-retuned-full",
17
+ "normalize_last_token_embeds": true,
18
+ "normalize_sense_pooling": true,
19
+ "num_workers": 0,
20
+ "overwrite_output_dir": false,
21
+ "polish_pct": 0.5,
22
+ "resume_from_checkpoint": true,
23
+ "save_dir": "/home/mila/b/blaisej/workspace/trained-models/backpack-adapted-eng-ind-v5",
24
+ "save_every": 2000,
25
+ "seed": 1234,
26
+ "sense_pool_temp": 0.7,
27
+ "src": "eng",
28
+ "tau_ctx_end": 0.07,
29
+ "tau_ctx_start": 0.07,
30
+ "tau_sns_end": 0.05,
31
+ "tau_sns_start": 0.05,
32
+ "test_batch_size": 64,
33
+ "tgt": "ind",
34
+ "train_batch_size": 64,
35
+ "use_fp16": true,
36
+ "use_wandb": true,
37
+ "w_ctx_align": 0.45,
38
+ "w_ctx_mid": 0.4,
39
+ "w_ctx_tail": 0.15,
40
+ "w_lm_align": 0.02,
41
+ "w_lm_mid": 0.2,
42
+ "w_lm_tail": 0.7,
43
+ "w_sns_align": 0.55,
44
+ "w_sns_mid": 0.4,
45
+ "w_sns_tail": 0.15,
46
+ "wandb_project": "jan_cruz_backpack",
47
+ "wandb_run_id": "",
48
+ "wandb_run_name": "adaptation-eng-ind-v5",
49
+ "warmup_ratio": 0.1
50
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff