Commit from model create scripts
Browse files- .gitattributes +2 -8
- config.gin +179 -0
- config.json +29 -0
- flax_model.msgpack +3 -0
- model-info.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- spiece.model +3 -0
- tokenizer.json +3 -0
- tokenizer_config.json +1 -0
- train/events.out.tfevents.1673617845.t1v-n-a0e4cb08-w-0.1682318.0.v2 +3 -0
- training_eval/translate_long/events.out.tfevents.1673617845.t1v-n-a0e4cb08-w-0.1682318.1.v2 +3 -0
.gitattributes
CHANGED
|
@@ -2,27 +2,20 @@
|
|
| 2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
|
@@ -30,5 +23,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 30 |
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 31 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.
|
| 34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 5 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 6 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 8 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 9 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 10 |
*.model filter=lfs diff=lfs merge=lfs -text
|
| 11 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
| 12 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 13 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 14 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 15 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
| 16 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 17 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 18 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 19 |
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 20 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 21 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 23 |
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 24 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
| 27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
config.gin
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __gin__ import dynamic_registration
|
| 2 |
+
import __main__ as train_script
|
| 3 |
+
import seqio
|
| 4 |
+
from t5.data import mixtures
|
| 5 |
+
from t5x import adafactor
|
| 6 |
+
from t5x.examples.t5 import network
|
| 7 |
+
from t5x import gin_utils
|
| 8 |
+
from t5x import models
|
| 9 |
+
from t5x import partitioning
|
| 10 |
+
from t5x import trainer
|
| 11 |
+
from t5x import utils
|
| 12 |
+
import tasks
|
| 13 |
+
|
| 14 |
+
# Macros:
|
| 15 |
+
# ==============================================================================
|
| 16 |
+
BATCH_SIZE = 128
|
| 17 |
+
DROPOUT_RATE = 0.1
|
| 18 |
+
EVAL_PERIOD = 1000
|
| 19 |
+
EVAL_STEPS = 20
|
| 20 |
+
EVALUATOR_NUM_EXAMPLES = None
|
| 21 |
+
EVALUATOR_USE_MEMORY_CACHE = True
|
| 22 |
+
INITIAL_CHECKPOINT_PATH = \
|
| 23 |
+
'gs://nb-t5x-us-central2/norwegian_NCC_plus_English_pluss200k_balanced_bokmaal_nynorsk_t5x_small/checkpoint_1700000'
|
| 24 |
+
JSON_WRITE_N_RESULTS = None
|
| 25 |
+
LABEL_SMOOTHING = 0.0
|
| 26 |
+
LOSS_NORMALIZING_FACTOR = None
|
| 27 |
+
MIXTURE_OR_TASK_MODULE = None
|
| 28 |
+
MIXTURE_OR_TASK_NAME = 'translate_long'
|
| 29 |
+
MODEL = @models.EncoderDecoderModel()
|
| 30 |
+
MODEL_DIR = 'gs://nb-t5x-us-central2/finetuned/nynorsk_balanced_small_long_v1'
|
| 31 |
+
OPTIMIZER = @adafactor.Adafactor()
|
| 32 |
+
RANDOM_SEED = 0
|
| 33 |
+
TASK_FEATURE_LENGTHS = {'inputs': 512, 'targets': 512}
|
| 34 |
+
TRAIN_STEPS = 1705000
|
| 35 |
+
USE_CACHED_TASKS = False
|
| 36 |
+
USE_HARDWARE_RNG = False
|
| 37 |
+
VOCABULARY = @seqio.SentencePieceVocabulary()
|
| 38 |
+
Z_LOSS = 0.0001
|
| 39 |
+
|
| 40 |
+
# Parameters for adafactor.Adafactor:
|
| 41 |
+
# ==============================================================================
|
| 42 |
+
adafactor.Adafactor.decay_rate = 0.8
|
| 43 |
+
adafactor.Adafactor.logical_factor_rules = \
|
| 44 |
+
@adafactor.standard_logical_factor_rules()
|
| 45 |
+
adafactor.Adafactor.step_offset = 0
|
| 46 |
+
|
| 47 |
+
# Parameters for utils.CheckpointConfig:
|
| 48 |
+
# ==============================================================================
|
| 49 |
+
utils.CheckpointConfig.restore = @utils.RestoreCheckpointConfig()
|
| 50 |
+
utils.CheckpointConfig.save = @utils.SaveCheckpointConfig()
|
| 51 |
+
|
| 52 |
+
# Parameters for utils.create_learning_rate_scheduler:
|
| 53 |
+
# ==============================================================================
|
| 54 |
+
utils.create_learning_rate_scheduler.base_learning_rate = 0.001
|
| 55 |
+
utils.create_learning_rate_scheduler.factors = 'constant'
|
| 56 |
+
utils.create_learning_rate_scheduler.warmup_steps = 1000
|
| 57 |
+
|
| 58 |
+
# Parameters for infer_eval/utils.DatasetConfig:
|
| 59 |
+
# ==============================================================================
|
| 60 |
+
infer_eval/utils.DatasetConfig.batch_size = %BATCH_SIZE
|
| 61 |
+
infer_eval/utils.DatasetConfig.mixture_or_task_name = %MIXTURE_OR_TASK_NAME
|
| 62 |
+
infer_eval/utils.DatasetConfig.module = %MIXTURE_OR_TASK_MODULE
|
| 63 |
+
infer_eval/utils.DatasetConfig.pack = False
|
| 64 |
+
infer_eval/utils.DatasetConfig.seed = 42
|
| 65 |
+
infer_eval/utils.DatasetConfig.shuffle = False
|
| 66 |
+
infer_eval/utils.DatasetConfig.split = 'validation'
|
| 67 |
+
infer_eval/utils.DatasetConfig.task_feature_lengths = %TASK_FEATURE_LENGTHS
|
| 68 |
+
infer_eval/utils.DatasetConfig.use_cached = %USE_CACHED_TASKS
|
| 69 |
+
|
| 70 |
+
# Parameters for train/utils.DatasetConfig:
|
| 71 |
+
# ==============================================================================
|
| 72 |
+
train/utils.DatasetConfig.batch_size = %BATCH_SIZE
|
| 73 |
+
train/utils.DatasetConfig.mixture_or_task_name = %MIXTURE_OR_TASK_NAME
|
| 74 |
+
train/utils.DatasetConfig.module = %MIXTURE_OR_TASK_MODULE
|
| 75 |
+
train/utils.DatasetConfig.pack = True
|
| 76 |
+
train/utils.DatasetConfig.seed = None
|
| 77 |
+
train/utils.DatasetConfig.shuffle = True
|
| 78 |
+
train/utils.DatasetConfig.split = 'train'
|
| 79 |
+
train/utils.DatasetConfig.task_feature_lengths = %TASK_FEATURE_LENGTHS
|
| 80 |
+
train/utils.DatasetConfig.use_cached = %USE_CACHED_TASKS
|
| 81 |
+
|
| 82 |
+
# Parameters for train_eval/utils.DatasetConfig:
|
| 83 |
+
# ==============================================================================
|
| 84 |
+
train_eval/utils.DatasetConfig.batch_size = %BATCH_SIZE
|
| 85 |
+
train_eval/utils.DatasetConfig.mixture_or_task_name = %MIXTURE_OR_TASK_NAME
|
| 86 |
+
train_eval/utils.DatasetConfig.module = %MIXTURE_OR_TASK_MODULE
|
| 87 |
+
train_eval/utils.DatasetConfig.pack = True
|
| 88 |
+
train_eval/utils.DatasetConfig.seed = 42
|
| 89 |
+
train_eval/utils.DatasetConfig.shuffle = False
|
| 90 |
+
train_eval/utils.DatasetConfig.split = 'validation'
|
| 91 |
+
train_eval/utils.DatasetConfig.task_feature_lengths = %TASK_FEATURE_LENGTHS
|
| 92 |
+
train_eval/utils.DatasetConfig.use_cached = %USE_CACHED_TASKS
|
| 93 |
+
|
| 94 |
+
# Parameters for models.EncoderDecoderModel:
|
| 95 |
+
# ==============================================================================
|
| 96 |
+
models.EncoderDecoderModel.input_vocabulary = %VOCABULARY
|
| 97 |
+
models.EncoderDecoderModel.label_smoothing = %LABEL_SMOOTHING
|
| 98 |
+
models.EncoderDecoderModel.loss_normalizing_factor = %LOSS_NORMALIZING_FACTOR
|
| 99 |
+
models.EncoderDecoderModel.module = @network.Transformer()
|
| 100 |
+
models.EncoderDecoderModel.optimizer_def = %OPTIMIZER
|
| 101 |
+
models.EncoderDecoderModel.output_vocabulary = %VOCABULARY
|
| 102 |
+
models.EncoderDecoderModel.z_loss = %Z_LOSS
|
| 103 |
+
|
| 104 |
+
# Parameters for seqio.Evaluator:
|
| 105 |
+
# ==============================================================================
|
| 106 |
+
seqio.Evaluator.logger_cls = \
|
| 107 |
+
[@seqio.PyLoggingLogger, @seqio.TensorBoardLogger, @seqio.JSONLogger]
|
| 108 |
+
seqio.Evaluator.num_examples = %EVALUATOR_NUM_EXAMPLES
|
| 109 |
+
seqio.Evaluator.use_memory_cache = %EVALUATOR_USE_MEMORY_CACHE
|
| 110 |
+
|
| 111 |
+
# Parameters for seqio.JSONLogger:
|
| 112 |
+
# ==============================================================================
|
| 113 |
+
seqio.JSONLogger.write_n_results = %JSON_WRITE_N_RESULTS
|
| 114 |
+
|
| 115 |
+
# Parameters for partitioning.PjitPartitioner:
|
| 116 |
+
# ==============================================================================
|
| 117 |
+
partitioning.PjitPartitioner.logical_axis_rules = \
|
| 118 |
+
@partitioning.standard_logical_axis_rules()
|
| 119 |
+
partitioning.PjitPartitioner.model_parallel_submesh = None
|
| 120 |
+
partitioning.PjitPartitioner.num_partitions = 1
|
| 121 |
+
|
| 122 |
+
# Parameters for utils.RestoreCheckpointConfig:
|
| 123 |
+
# ==============================================================================
|
| 124 |
+
utils.RestoreCheckpointConfig.dtype = 'float32'
|
| 125 |
+
utils.RestoreCheckpointConfig.mode = 'specific'
|
| 126 |
+
utils.RestoreCheckpointConfig.path = %INITIAL_CHECKPOINT_PATH
|
| 127 |
+
|
| 128 |
+
# Parameters for utils.SaveCheckpointConfig:
|
| 129 |
+
# ==============================================================================
|
| 130 |
+
utils.SaveCheckpointConfig.dtype = 'float32'
|
| 131 |
+
utils.SaveCheckpointConfig.keep = None
|
| 132 |
+
utils.SaveCheckpointConfig.period = 1000
|
| 133 |
+
utils.SaveCheckpointConfig.save_dataset = False
|
| 134 |
+
|
| 135 |
+
# Parameters for seqio.SentencePieceVocabulary:
|
| 136 |
+
# ==============================================================================
|
| 137 |
+
seqio.SentencePieceVocabulary.sentencepiece_model_file = \
|
| 138 |
+
'gs://t5-data/vocabs/mc4.250000.100extra/sentencepiece.model'
|
| 139 |
+
|
| 140 |
+
# Parameters for network.T5Config:
|
| 141 |
+
# ==============================================================================
|
| 142 |
+
network.T5Config.dropout_rate = %DROPOUT_RATE
|
| 143 |
+
network.T5Config.dtype = 'bfloat16'
|
| 144 |
+
network.T5Config.emb_dim = 512
|
| 145 |
+
network.T5Config.head_dim = 64
|
| 146 |
+
network.T5Config.logits_via_embedding = False
|
| 147 |
+
network.T5Config.mlp_activations = ('gelu', 'linear')
|
| 148 |
+
network.T5Config.mlp_dim = 1024
|
| 149 |
+
network.T5Config.num_decoder_layers = 8
|
| 150 |
+
network.T5Config.num_encoder_layers = 8
|
| 151 |
+
network.T5Config.num_heads = 6
|
| 152 |
+
network.T5Config.vocab_size = 250112
|
| 153 |
+
|
| 154 |
+
# Parameters for train_script.train:
|
| 155 |
+
# ==============================================================================
|
| 156 |
+
train_script.train.checkpoint_cfg = @utils.CheckpointConfig()
|
| 157 |
+
train_script.train.eval_period = %EVAL_PERIOD
|
| 158 |
+
train_script.train.eval_steps = %EVAL_STEPS
|
| 159 |
+
train_script.train.infer_eval_dataset_cfg = @infer_eval/utils.DatasetConfig()
|
| 160 |
+
train_script.train.inference_evaluator_cls = @seqio.Evaluator
|
| 161 |
+
train_script.train.model = %MODEL
|
| 162 |
+
train_script.train.model_dir = %MODEL_DIR
|
| 163 |
+
train_script.train.partitioner = @partitioning.PjitPartitioner()
|
| 164 |
+
train_script.train.random_seed = %RANDOM_SEED
|
| 165 |
+
train_script.train.summarize_config_fn = @gin_utils.summarize_gin_config
|
| 166 |
+
train_script.train.total_steps = %TRAIN_STEPS
|
| 167 |
+
train_script.train.train_dataset_cfg = @train/utils.DatasetConfig()
|
| 168 |
+
train_script.train.train_eval_dataset_cfg = @train_eval/utils.DatasetConfig()
|
| 169 |
+
train_script.train.trainer_cls = @trainer.Trainer
|
| 170 |
+
train_script.train.use_hardware_rng = %USE_HARDWARE_RNG
|
| 171 |
+
|
| 172 |
+
# Parameters for trainer.Trainer:
|
| 173 |
+
# ==============================================================================
|
| 174 |
+
trainer.Trainer.learning_rate_fn = @utils.create_learning_rate_scheduler()
|
| 175 |
+
trainer.Trainer.num_microbatches = None
|
| 176 |
+
|
| 177 |
+
# Parameters for network.Transformer:
|
| 178 |
+
# ==============================================================================
|
| 179 |
+
network.Transformer.config = @network.T5Config()
|
config.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "/home/perk/models/nynorsk_North_small_long",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"T5ForConditionalGeneration"
|
| 5 |
+
],
|
| 6 |
+
"d_ff": 1024,
|
| 7 |
+
"d_kv": 64,
|
| 8 |
+
"d_model": 512,
|
| 9 |
+
"decoder_start_token_id": 0,
|
| 10 |
+
"dropout_rate": 0.1,
|
| 11 |
+
"eos_token_id": 1,
|
| 12 |
+
"feed_forward_proj": "gated-gelu",
|
| 13 |
+
"initializer_factor": 1.0,
|
| 14 |
+
"is_encoder_decoder": true,
|
| 15 |
+
"layer_norm_epsilon": 1e-06,
|
| 16 |
+
"model_type": "t5",
|
| 17 |
+
"num_decoder_layers": 8,
|
| 18 |
+
"num_heads": 6,
|
| 19 |
+
"num_layers": 8,
|
| 20 |
+
"pad_token_id": 0,
|
| 21 |
+
"relative_attention_max_distance": 128,
|
| 22 |
+
"relative_attention_num_buckets": 32,
|
| 23 |
+
"tie_word_embeddings": false,
|
| 24 |
+
"tokenizer_class": "T5Tokenizer",
|
| 25 |
+
"torch_dtype": "float32",
|
| 26 |
+
"transformers_version": "4.19.2",
|
| 27 |
+
"use_cache": true,
|
| 28 |
+
"vocab_size": 250112
|
| 29 |
+
}
|
flax_model.msgpack
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2092700b78ce60d7724b82df50270465bc60ca219a8fa493afca4614b0f7f0f8
|
| 3 |
+
size 1200715307
|
model-info.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:655f0675e665435d2f58ac71a60c2c2aca5c47a216d45051a3b7ad205d861baf
|
| 3 |
+
size 1200768197
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
|
spiece.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6
|
| 3 |
+
size 4309802
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:93c3578052e1605d8332eb961bc08d72e246071974e4cc54aa6991826b802aa5
|
| 3 |
+
size 16330369
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 0, "additional_special_tokens": null, "special_tokens_map_file": "/home/patrick/.cache/torch/transformers/685ac0ca8568ec593a48b61b0a3c272beee9bc194a3c7241d15dcadb5f875e53.f76030f3ec1b96a8199b2593390c610e76ca8028ef3d24680000619ffb646276", "name_or_path": "/home/perk/models/nynorsk_North_small_long", "sp_model_kwargs": {}, "tokenizer_class": "T5Tokenizer"}
|
train/events.out.tfevents.1673617845.t1v-n-a0e4cb08-w-0.1682318.0.v2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6945fb66a975ab069ae3f6a35efc3cf2eb42fcf0b13f1609bf4f7d3cadaf29c2
|
| 3 |
+
size 9938386
|
training_eval/translate_long/events.out.tfevents.1673617845.t1v-n-a0e4cb08-w-0.1682318.1.v2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b5d4c628d312b8bfbcdbc286f0b4bdae357118fbf9b8f8429874e4989907bebe
|
| 3 |
+
size 8921
|