kbwaaablya commited on
Commit
4fcae93
·
verified ·
1 Parent(s): 60d052b

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ checkpoint-500/source.spm filter=lfs diff=lfs merge=lfs -text
37
+ checkpoint-500/target.spm filter=lfs diff=lfs merge=lfs -text
38
+ checkpoint-729/source.spm filter=lfs diff=lfs merge=lfs -text
39
+ checkpoint-729/target.spm filter=lfs diff=lfs merge=lfs -text
40
+ source.spm filter=lfs diff=lfs merge=lfs -text
41
+ target.spm filter=lfs diff=lfs merge=lfs -text
checkpoint-500/config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "relu",
4
+ "architectures": [
5
+ "MarianMTModel"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": 0.0,
10
+ "d_model": 1024,
11
+ "decoder_attention_heads": 16,
12
+ "decoder_ffn_dim": 4096,
13
+ "decoder_layerdrop": 0.0,
14
+ "decoder_layers": 6,
15
+ "decoder_start_token_id": 58297,
16
+ "decoder_vocab_size": 58298,
17
+ "dropout": 0.1,
18
+ "encoder_attention_heads": 16,
19
+ "encoder_ffn_dim": 4096,
20
+ "encoder_layerdrop": 0.0,
21
+ "encoder_layers": 6,
22
+ "eos_token_id": 45603,
23
+ "forced_eos_token_id": null,
24
+ "init_std": 0.02,
25
+ "is_encoder_decoder": true,
26
+ "max_length": null,
27
+ "max_position_embeddings": 1024,
28
+ "model_type": "marian",
29
+ "normalize_embedding": false,
30
+ "num_beams": null,
31
+ "num_hidden_layers": 6,
32
+ "pad_token_id": 58297,
33
+ "scale_embedding": true,
34
+ "share_encoder_decoder_embeddings": true,
35
+ "static_position_embeddings": true,
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.50.3",
38
+ "use_cache": true,
39
+ "vocab_size": 58298
40
+ }
checkpoint-500/generation_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bad_words_ids": [
3
+ [
4
+ 58297
5
+ ]
6
+ ],
7
+ "bos_token_id": 0,
8
+ "decoder_start_token_id": 58297,
9
+ "eos_token_id": 45603,
10
+ "forced_eos_token_id": 45603,
11
+ "max_length": 512,
12
+ "num_beams": 4,
13
+ "pad_token_id": 58297,
14
+ "renormalize_logits": true,
15
+ "transformers_version": "4.50.3"
16
+ }
checkpoint-500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee650e4b0511d0e6c833c1c2b6dfa787f249add60f9af2dd63f3f0eb8e29f761
3
+ size 944480920
checkpoint-500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c02b58fa79aa9053d7b24bafb759a333657c4a927bd3e1146d0f7922345ecd57
3
+ size 1888647802
checkpoint-500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8bd4f887a4dfc2ec04e88b27e2f24434961434651e5b601db63a08fa64cd06a
3
+ size 14244
checkpoint-500/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bfa51e071ccffcbabecfa11f208d56e0863809bdd9bf20f7e79e215c2919b42
3
+ size 988
checkpoint-500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0531c4728970fe3718b0e2aea4a9f23f7f7069a53976bf37f68f21417c50bce
3
+ size 1064
checkpoint-500/source.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94634cc0b00a2a1e0e2e4317cc0a49d0154d835f7951425e45be15c72ba21417
3
+ size 798014
checkpoint-500/special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
checkpoint-500/target.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25446f415fe316afbf692acf7ef5131a74940dc820e2cbe31300f09ac3c492dc
3
+ size 838711
checkpoint-500/tokenizer_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "45603": {
4
+ "content": "</s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "54359": {
12
+ "content": "<unk>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "58297": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "clean_up_tokenization_spaces": false,
29
+ "eos_token": "</s>",
30
+ "extra_special_tokens": {},
31
+ "model_max_length": 512,
32
+ "pad_token": "<pad>",
33
+ "separate_vocabs": false,
34
+ "source_lang": "en",
35
+ "sp_model_kwargs": {},
36
+ "target_lang": "lv",
37
+ "tokenizer_class": "MarianTokenizer",
38
+ "unk_token": "<unk>"
39
+ }
checkpoint-500/trainer_state.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 2.05761316872428,
6
+ "eval_steps": 500,
7
+ "global_step": 500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "eval_loss": 1.3566852807998657,
15
+ "eval_runtime": 0.7082,
16
+ "eval_samples_per_second": 611.395,
17
+ "eval_steps_per_second": 77.66,
18
+ "step": 243
19
+ },
20
+ {
21
+ "epoch": 2.0,
22
+ "eval_loss": 1.2682921886444092,
23
+ "eval_runtime": 0.6768,
24
+ "eval_samples_per_second": 639.761,
25
+ "eval_steps_per_second": 81.263,
26
+ "step": 486
27
+ },
28
+ {
29
+ "epoch": 2.05761316872428,
30
+ "grad_norm": 9.423436164855957,
31
+ "learning_rate": 6.3374485596707825e-06,
32
+ "loss": 1.3704,
33
+ "step": 500
34
+ }
35
+ ],
36
+ "logging_steps": 500,
37
+ "max_steps": 729,
38
+ "num_input_tokens_seen": 0,
39
+ "num_train_epochs": 3,
40
+ "save_steps": 500,
41
+ "stateful_callbacks": {
42
+ "TrainerControl": {
43
+ "args": {
44
+ "should_epoch_stop": false,
45
+ "should_evaluate": false,
46
+ "should_log": false,
47
+ "should_save": true,
48
+ "should_training_stop": false
49
+ },
50
+ "attributes": {}
51
+ }
52
+ },
53
+ "total_flos": 109302069067776.0,
54
+ "train_batch_size": 16,
55
+ "trial_name": null,
56
+ "trial_params": null
57
+ }
checkpoint-500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1c563b600c69d2804728e9cfb80935191961a93c9761c2af118e10f951e12ea
3
+ size 5496
checkpoint-500/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-729/config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "relu",
4
+ "architectures": [
5
+ "MarianMTModel"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": 0.0,
10
+ "d_model": 1024,
11
+ "decoder_attention_heads": 16,
12
+ "decoder_ffn_dim": 4096,
13
+ "decoder_layerdrop": 0.0,
14
+ "decoder_layers": 6,
15
+ "decoder_start_token_id": 58297,
16
+ "decoder_vocab_size": 58298,
17
+ "dropout": 0.1,
18
+ "encoder_attention_heads": 16,
19
+ "encoder_ffn_dim": 4096,
20
+ "encoder_layerdrop": 0.0,
21
+ "encoder_layers": 6,
22
+ "eos_token_id": 45603,
23
+ "forced_eos_token_id": null,
24
+ "init_std": 0.02,
25
+ "is_encoder_decoder": true,
26
+ "max_length": null,
27
+ "max_position_embeddings": 1024,
28
+ "model_type": "marian",
29
+ "normalize_embedding": false,
30
+ "num_beams": null,
31
+ "num_hidden_layers": 6,
32
+ "pad_token_id": 58297,
33
+ "scale_embedding": true,
34
+ "share_encoder_decoder_embeddings": true,
35
+ "static_position_embeddings": true,
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.50.3",
38
+ "use_cache": true,
39
+ "vocab_size": 58298
40
+ }
checkpoint-729/generation_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bad_words_ids": [
3
+ [
4
+ 58297
5
+ ]
6
+ ],
7
+ "bos_token_id": 0,
8
+ "decoder_start_token_id": 58297,
9
+ "eos_token_id": 45603,
10
+ "forced_eos_token_id": 45603,
11
+ "max_length": 512,
12
+ "num_beams": 4,
13
+ "pad_token_id": 58297,
14
+ "renormalize_logits": true,
15
+ "transformers_version": "4.50.3"
16
+ }
checkpoint-729/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91c70b56aa595b23fba00656eb7563993fe0e2ce84c6ef412ac70c27b876fb44
3
+ size 944480920
checkpoint-729/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbe389f2ec3a56a06e3783bb4c0a3e45b8a4e3d6feaac7caaf406bd9b65de030
3
+ size 1888647802
checkpoint-729/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c646261de0de4d9659cfc4dc149e116806085fd03be89858296eef087a55b532
3
+ size 14244
checkpoint-729/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c16a360deedb9188983d768b70e906d28bbb3555fd7a93647015ad5f43f2ccdb
3
+ size 988
checkpoint-729/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65ca8c525c485e92210800dd551db2d064ae323799a3c9dd02921f2b56752fe7
3
+ size 1064
checkpoint-729/source.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94634cc0b00a2a1e0e2e4317cc0a49d0154d835f7951425e45be15c72ba21417
3
+ size 798014
checkpoint-729/special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
checkpoint-729/target.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25446f415fe316afbf692acf7ef5131a74940dc820e2cbe31300f09ac3c492dc
3
+ size 838711
checkpoint-729/tokenizer_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "45603": {
4
+ "content": "</s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "54359": {
12
+ "content": "<unk>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "58297": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "clean_up_tokenization_spaces": false,
29
+ "eos_token": "</s>",
30
+ "extra_special_tokens": {},
31
+ "model_max_length": 512,
32
+ "pad_token": "<pad>",
33
+ "separate_vocabs": false,
34
+ "source_lang": "en",
35
+ "sp_model_kwargs": {},
36
+ "target_lang": "lv",
37
+ "tokenizer_class": "MarianTokenizer",
38
+ "unk_token": "<unk>"
39
+ }
checkpoint-729/trainer_state.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 729,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "eval_loss": 1.3566852807998657,
15
+ "eval_runtime": 0.7082,
16
+ "eval_samples_per_second": 611.395,
17
+ "eval_steps_per_second": 77.66,
18
+ "step": 243
19
+ },
20
+ {
21
+ "epoch": 2.0,
22
+ "eval_loss": 1.2682921886444092,
23
+ "eval_runtime": 0.6768,
24
+ "eval_samples_per_second": 639.761,
25
+ "eval_steps_per_second": 81.263,
26
+ "step": 486
27
+ },
28
+ {
29
+ "epoch": 2.05761316872428,
30
+ "grad_norm": 9.423436164855957,
31
+ "learning_rate": 6.3374485596707825e-06,
32
+ "loss": 1.3704,
33
+ "step": 500
34
+ }
35
+ ],
36
+ "logging_steps": 500,
37
+ "max_steps": 729,
38
+ "num_input_tokens_seen": 0,
39
+ "num_train_epochs": 3,
40
+ "save_steps": 500,
41
+ "stateful_callbacks": {
42
+ "TrainerControl": {
43
+ "args": {
44
+ "should_epoch_stop": false,
45
+ "should_evaluate": false,
46
+ "should_log": false,
47
+ "should_save": true,
48
+ "should_training_stop": true
49
+ },
50
+ "attributes": {}
51
+ }
52
+ },
53
+ "total_flos": 159331129491456.0,
54
+ "train_batch_size": 16,
55
+ "trial_name": null,
56
+ "trial_params": null
57
+ }
checkpoint-729/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1c563b600c69d2804728e9cfb80935191961a93c9761c2af118e10f951e12ea
3
+ size 5496
checkpoint-729/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "relu",
4
+ "architectures": [
5
+ "MarianMTModel"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": 0.0,
10
+ "d_model": 1024,
11
+ "decoder_attention_heads": 16,
12
+ "decoder_ffn_dim": 4096,
13
+ "decoder_layerdrop": 0.0,
14
+ "decoder_layers": 6,
15
+ "decoder_start_token_id": 58297,
16
+ "decoder_vocab_size": 58298,
17
+ "dropout": 0.1,
18
+ "encoder_attention_heads": 16,
19
+ "encoder_ffn_dim": 4096,
20
+ "encoder_layerdrop": 0.0,
21
+ "encoder_layers": 6,
22
+ "eos_token_id": 45603,
23
+ "forced_eos_token_id": null,
24
+ "init_std": 0.02,
25
+ "is_encoder_decoder": true,
26
+ "max_length": null,
27
+ "max_position_embeddings": 1024,
28
+ "model_type": "marian",
29
+ "normalize_embedding": false,
30
+ "num_beams": null,
31
+ "num_hidden_layers": 6,
32
+ "pad_token_id": 58297,
33
+ "scale_embedding": true,
34
+ "share_encoder_decoder_embeddings": true,
35
+ "static_position_embeddings": true,
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.50.3",
38
+ "use_cache": true,
39
+ "vocab_size": 58298
40
+ }
generation_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bad_words_ids": [
3
+ [
4
+ 58297
5
+ ]
6
+ ],
7
+ "bos_token_id": 0,
8
+ "decoder_start_token_id": 58297,
9
+ "eos_token_id": 45603,
10
+ "forced_eos_token_id": 45603,
11
+ "max_length": 512,
12
+ "num_beams": 4,
13
+ "pad_token_id": 58297,
14
+ "renormalize_logits": true,
15
+ "transformers_version": "4.50.3"
16
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91c70b56aa595b23fba00656eb7563993fe0e2ce84c6ef412ac70c27b876fb44
3
+ size 944480920
source.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94634cc0b00a2a1e0e2e4317cc0a49d0154d835f7951425e45be15c72ba21417
3
+ size 798014
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
target.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25446f415fe316afbf692acf7ef5131a74940dc820e2cbe31300f09ac3c492dc
3
+ size 838711
tokenizer_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "45603": {
4
+ "content": "</s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "54359": {
12
+ "content": "<unk>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "58297": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "clean_up_tokenization_spaces": false,
29
+ "eos_token": "</s>",
30
+ "extra_special_tokens": {},
31
+ "model_max_length": 512,
32
+ "pad_token": "<pad>",
33
+ "separate_vocabs": false,
34
+ "source_lang": "en",
35
+ "sp_model_kwargs": {},
36
+ "target_lang": "lv",
37
+ "tokenizer_class": "MarianTokenizer",
38
+ "unk_token": "<unk>"
39
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1c563b600c69d2804728e9cfb80935191961a93c9761c2af118e10f951e12ea
3
+ size 5496
vocab.json ADDED
The diff for this file is too large to render. See raw diff