sihuapeng commited on
Commit
200ecb9
·
verified ·
1 Parent(s): 109ce89

Upload 6 files

Browse files
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/esm2_t33_650M_UR50D",
3
+ "architectures": [
4
+ "DualEsmForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "classifier_dropout": 0.4,
8
+ "emb_layer_norm_before": false,
9
+ "esmfold_config": null,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.0,
12
+ "hidden_size": 1280,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 5120,
15
+ "is_folding_model": false,
16
+ "layer_norm_eps": 1e-05,
17
+ "mask_token_id": 32,
18
+ "max_position_embeddings": 1026,
19
+ "model_type": "esm",
20
+ "num_attention_heads": 20,
21
+ "num_hidden_layers": 33,
22
+ "pad_token_id": 1,
23
+ "position_embedding_type": "rotary",
24
+ "token_dropout": true,
25
+ "torch_dtype": "float16",
26
+ "transformers_version": "4.49.0",
27
+ "use_cache": true,
28
+ "vocab_list": null,
29
+ "vocab_size": 33
30
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d9aa6b79c4bb157430c0fc83c77a5605ae9bda663c5e5768aa2f89cc0716fda
3
+ size 1308059038
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "<cls>",
3
+ "eos_token": "<eos>",
4
+ "mask_token": "<mask>",
5
+ "pad_token": "<pad>",
6
+ "unk_token": "<unk>"
7
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<cls>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<eos>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "32": {
36
+ "content": "<mask>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "<cls>",
46
+ "eos_token": "<eos>",
47
+ "extra_special_tokens": {},
48
+ "mask_token": "<mask>",
49
+ "model_max_length": 1000000000000000019884624838656,
50
+ "pad_token": "<pad>",
51
+ "tokenizer_class": "EsmTokenizer",
52
+ "unk_token": "<unk>"
53
+ }
training_args.json ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_dir": "esm2_t33_650M_UR50D-dual-seq",
3
+ "overwrite_output_dir": false,
4
+ "do_train": true,
5
+ "do_eval": true,
6
+ "do_predict": false,
7
+ "eval_strategy": "epoch",
8
+ "prediction_loss_only": false,
9
+ "per_device_train_batch_size": 48,
10
+ "per_device_eval_batch_size": 48,
11
+ "per_gpu_train_batch_size": null,
12
+ "per_gpu_eval_batch_size": null,
13
+ "gradient_accumulation_steps": 1,
14
+ "eval_accumulation_steps": null,
15
+ "eval_delay": 0,
16
+ "torch_empty_cache_steps": null,
17
+ "learning_rate": 1e-05,
18
+ "weight_decay": 0.01,
19
+ "adam_beta1": 0.9,
20
+ "adam_beta2": 0.999,
21
+ "adam_epsilon": 1e-08,
22
+ "max_grad_norm": 1.0,
23
+ "num_train_epochs": 33,
24
+ "max_steps": -1,
25
+ "lr_scheduler_type": "cosine",
26
+ "lr_scheduler_kwargs": {},
27
+ "warmup_ratio": 0.0,
28
+ "warmup_steps": 1000,
29
+ "log_level": "passive",
30
+ "log_level_replica": "warning",
31
+ "log_on_each_node": true,
32
+ "logging_dir": "./logs-esm2-dual-650M",
33
+ "logging_strategy": "steps",
34
+ "logging_first_step": false,
35
+ "logging_steps": 100,
36
+ "logging_nan_inf_filter": true,
37
+ "save_strategy": "epoch",
38
+ "save_steps": 500,
39
+ "save_total_limit": null,
40
+ "save_safetensors": true,
41
+ "save_on_each_node": false,
42
+ "save_only_model": false,
43
+ "restore_callback_states_from_checkpoint": false,
44
+ "no_cuda": false,
45
+ "use_cpu": false,
46
+ "use_mps_device": false,
47
+ "seed": 42,
48
+ "data_seed": null,
49
+ "jit_mode_eval": false,
50
+ "use_ipex": false,
51
+ "bf16": false,
52
+ "fp16": true,
53
+ "fp16_opt_level": "O1",
54
+ "half_precision_backend": "auto",
55
+ "bf16_full_eval": false,
56
+ "fp16_full_eval": false,
57
+ "tf32": null,
58
+ "local_rank": 0,
59
+ "ddp_backend": null,
60
+ "tpu_num_cores": null,
61
+ "tpu_metrics_debug": false,
62
+ "debug": [],
63
+ "dataloader_drop_last": false,
64
+ "eval_steps": null,
65
+ "dataloader_num_workers": 0,
66
+ "dataloader_prefetch_factor": null,
67
+ "past_index": -1,
68
+ "run_name": "esm2_t33_650M_UR50D-dual-seq",
69
+ "disable_tqdm": false,
70
+ "remove_unused_columns": true,
71
+ "label_names": [
72
+ "labels"
73
+ ],
74
+ "load_best_model_at_end": true,
75
+ "metric_for_best_model": "eval_accuracy",
76
+ "greater_is_better": true,
77
+ "ignore_data_skip": false,
78
+ "fsdp": [],
79
+ "fsdp_min_num_params": 0,
80
+ "fsdp_config": {
81
+ "min_num_params": 0,
82
+ "xla": false,
83
+ "xla_fsdp_v2": false,
84
+ "xla_fsdp_grad_ckpt": false
85
+ },
86
+ "fsdp_transformer_layer_cls_to_wrap": null,
87
+ "accelerator_config": "AcceleratorConfig(split_batches=False, dispatch_batches=None, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False)",
88
+ "deepspeed": "ds_config.json",
89
+ "label_smoothing_factor": 0.0,
90
+ "optim": "adamw_torch",
91
+ "optim_args": null,
92
+ "adafactor": false,
93
+ "group_by_length": false,
94
+ "length_column_name": "length",
95
+ "report_to": [
96
+ "tensorboard"
97
+ ],
98
+ "ddp_find_unused_parameters": null,
99
+ "ddp_bucket_cap_mb": null,
100
+ "ddp_broadcast_buffers": null,
101
+ "dataloader_pin_memory": true,
102
+ "dataloader_persistent_workers": false,
103
+ "skip_memory_metrics": true,
104
+ "use_legacy_prediction_loop": false,
105
+ "push_to_hub": false,
106
+ "resume_from_checkpoint": null,
107
+ "hub_model_id": null,
108
+ "hub_strategy": "every_save",
109
+ "hub_token": null,
110
+ "hub_private_repo": null,
111
+ "hub_always_push": false,
112
+ "gradient_checkpointing": false,
113
+ "gradient_checkpointing_kwargs": null,
114
+ "include_inputs_for_metrics": false,
115
+ "include_for_metrics": [],
116
+ "eval_do_concat_batches": true,
117
+ "fp16_backend": "auto",
118
+ "evaluation_strategy": null,
119
+ "push_to_hub_model_id": null,
120
+ "push_to_hub_organization": null,
121
+ "push_to_hub_token": null,
122
+ "mp_parameters": "",
123
+ "auto_find_batch_size": false,
124
+ "full_determinism": false,
125
+ "torchdynamo": null,
126
+ "ray_scope": "last",
127
+ "ddp_timeout": 1800,
128
+ "torch_compile": false,
129
+ "torch_compile_backend": null,
130
+ "torch_compile_mode": null,
131
+ "dispatch_batches": null,
132
+ "split_batches": null,
133
+ "include_tokens_per_second": false,
134
+ "include_num_input_tokens_seen": false,
135
+ "neftune_noise_alpha": null,
136
+ "optim_target_modules": null,
137
+ "batch_eval_metrics": false,
138
+ "eval_on_start": false,
139
+ "use_liger_kernel": false,
140
+ "eval_use_gather_object": false,
141
+ "average_tokens_across_devices": false,
142
+ "distributed_state": "Distributed environment: DEEPSPEED Backend: nccl\nNum processes: 4\nProcess index: 0\nLocal process index: 0\nDevice: cuda:0\n",
143
+ "_n_gpu": 1,
144
+ "__cached__setup_devices": "cuda:0",
145
+ "deepspeed_plugin": "DeepSpeedPlugin(hf_ds_config=<transformers.integrations.deepspeed.HfTrainerDeepSpeedConfig object at 0x1541fdc514e0>, gradient_accumulation_steps=1, gradient_clipping=1.0, zero_stage=2, is_train_batch_min=True, offload_optimizer_device='none', offload_param_device='none', offload_optimizer_nvme_path='none', offload_param_nvme_path='none', zero3_init_flag=False, zero3_save_16bit_model=False, transformer_moe_cls_names=None)",
146
+ "hf_deepspeed_config": "<transformers.integrations.deepspeed.HfTrainerDeepSpeedConfig object at 0x1541fdc514e0>"
147
+ }
vocab.txt ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <cls>
2
+ <pad>
3
+ <eos>
4
+ <unk>
5
+ L
6
+ A
7
+ G
8
+ V
9
+ S
10
+ E
11
+ R
12
+ T
13
+ I
14
+ D
15
+ P
16
+ K
17
+ Q
18
+ N
19
+ F
20
+ Y
21
+ M
22
+ H
23
+ W
24
+ C
25
+ X
26
+ B
27
+ U
28
+ Z
29
+ O
30
+ .
31
+ -
32
+ <null_1>
33
+ <mask>