lhallee commited on
Commit
b84f03a
·
verified ·
1 Parent(s): 15db44a

Upload FastEsmForProteinFolding

Browse files
config.json ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "FastEsmForProteinFolding"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.0,
6
+ "attn_backend": "sdpa",
7
+ "auto_map": {
8
+ "AutoConfig": "modeling_fast_esmfold.FastEsmFoldConfig",
9
+ "AutoModel": "modeling_fast_esmfold.FastEsmForProteinFolding"
10
+ },
11
+ "classifier_dropout": null,
12
+ "dtype": "float32",
13
+ "emb_layer_norm_before": false,
14
+ "esmfold_config": {
15
+ "bypass_lm": false,
16
+ "embed_aa": true,
17
+ "esm_ablate_pairwise": false,
18
+ "esm_ablate_sequence": false,
19
+ "esm_input_dropout": 0,
20
+ "esm_type": "esm2_3B",
21
+ "fp16_esm": false,
22
+ "lddt_head_hid_dim": 128,
23
+ "trunk": {
24
+ "chunk_size": null,
25
+ "cpu_grad_checkpoint": false,
26
+ "dropout": 0,
27
+ "layer_drop": 0,
28
+ "max_recycles": 4,
29
+ "num_blocks": 48,
30
+ "pairwise_head_width": 32,
31
+ "pairwise_state_dim": 128,
32
+ "position_bins": 32,
33
+ "sequence_head_width": 32,
34
+ "sequence_state_dim": 1024,
35
+ "structure_module": {
36
+ "dropout_rate": 0.1,
37
+ "epsilon": 1e-08,
38
+ "inf": 100000.0,
39
+ "ipa_dim": 16,
40
+ "num_angles": 7,
41
+ "num_blocks": 8,
42
+ "num_heads_ipa": 12,
43
+ "num_qk_points": 4,
44
+ "num_resnet_blocks": 2,
45
+ "num_transition_layers": 1,
46
+ "num_v_points": 8,
47
+ "pairwise_dim": 128,
48
+ "resnet_dim": 128,
49
+ "sequence_dim": 384,
50
+ "trans_scale_factor": 10
51
+ }
52
+ },
53
+ "use_esm_attn_map": false
54
+ },
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout_prob": 0.0,
57
+ "hidden_size": 2560,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 10240,
60
+ "is_folding_model": true,
61
+ "layer_norm_eps": 1e-05,
62
+ "mask_token_id": 32,
63
+ "max_position_embeddings": 1026,
64
+ "model_type": "fast_esmfold",
65
+ "num_attention_heads": 40,
66
+ "num_hidden_layers": 36,
67
+ "pad_token_id": 1,
68
+ "position_embedding_type": "rotary",
69
+ "token_dropout": true,
70
+ "transformers_version": "4.57.0",
71
+ "ttt_config": {
72
+ "ags": 4,
73
+ "batch_size": 4,
74
+ "lora_alpha": 32.0,
75
+ "lora_rank": 8,
76
+ "lr": 0.0004,
77
+ "mask_ratio": 0.15,
78
+ "steps": 30
79
+ },
80
+ "use_cache": true,
81
+ "vocab_list": [
82
+ "<cls>",
83
+ "<pad>",
84
+ "<eos>",
85
+ "<unk>",
86
+ "L",
87
+ "A",
88
+ "G",
89
+ "V",
90
+ "S",
91
+ "E",
92
+ "R",
93
+ "T",
94
+ "I",
95
+ "D",
96
+ "P",
97
+ "K",
98
+ "Q",
99
+ "N",
100
+ "F",
101
+ "Y",
102
+ "M",
103
+ "H",
104
+ "W",
105
+ "C",
106
+ "X",
107
+ "B",
108
+ "U",
109
+ "Z",
110
+ "O",
111
+ ".",
112
+ "-",
113
+ "<null_1>",
114
+ "<mask>"
115
+ ],
116
+ "vocab_size": 33
117
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36fab9e5c96d409b2a34a8b4f1273acac8c07f119c32c4fcfa7d47bbcd55b83c
3
+ size 4930778876
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34954aaa05bc91635776ba6672946da5822626753d80db97b38c0538e9525102
3
+ size 4930430536
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f1178cda0e6cff3b1e158e1acc59c83e3f4fc46e246388a5127bc56b8d9c4f2
3
+ size 4266065284
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff