fondress commited on
Commit
1dc1510
·
verified ·
1 Parent(s): fe7c242

Upload 4 files

Browse files
Files changed (3) hide show
  1. config.json +21 -21
  2. configuration_pdeeppp.py +37 -0
  3. modeling_PDeepPP.py +1 -1
config.json CHANGED
@@ -1,21 +1,21 @@
1
- {
2
- "architectures": [
3
- "PDeepPPModel"
4
- ],
5
- "auto_map": {
6
- "AutoConfig": "configuration_PDeepPP.PDeepPPConfig",
7
- "AutoModel": "modeling_PDeepPP.PDeepPPModel"
8
- },
9
- "dropout": 0.3,
10
- "esm_ratio": 1.0,
11
- "hidden_size": 256,
12
- "input_size": 1280,
13
- "lambda_": 0.96,
14
- "model_type": "PDeepPP",
15
- "num_heads": 8,
16
- "num_transformer_layers": 4,
17
- "output_size": 128,
18
- "ptm_type": "ACE",
19
- "torch_dtype": "float32",
20
- "transformers_version": "4.35.2"
21
- }
 
1
+ {
2
+ "architectures": [
3
+ "PDeepPPModel"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_pdeeppp.PDeepPPConfig",
7
+ "AutoModel": "modeling_PDeepPP.PDeepPPModel"
8
+ },
9
+ "dropout": 0.3,
10
+ "esm_ratio": 1.0,
11
+ "hidden_size": 256,
12
+ "input_size": 1280,
13
+ "lambda_": 0.96,
14
+ "model_type": "PDeepPP",
15
+ "num_heads": 8,
16
+ "num_transformer_layers": 4,
17
+ "output_size": 128,
18
+ "ptm_type": "ACE",
19
+ "torch_dtype": "float32",
20
+ "transformers_version": "4.35.2"
21
+ }
configuration_pdeeppp.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, List, Optional, Union
3
+
4
+ from transformers.configuration_utils import PretrainedConfig
5
+ from transformers.utils import logging
6
+
7
+ logger = logging.get_logger(__name__)
8
+
9
+ class PDeepPPConfig(PretrainedConfig):
10
+
11
+ model_type = "PDeepPP"
12
+
13
+ def __init__(
14
+ self,
15
+ input_size=1280,
16
+ output_size=128,
17
+ num_heads=8,
18
+ hidden_size=256,
19
+ num_transformer_layers=4,
20
+ dropout=0.3,
21
+ ptm_type="ACE",
22
+ esm_ratio=0.96,
23
+ lambda_=1,
24
+ **kwargs
25
+ ):
26
+ super().__init__(**kwargs)
27
+ self.input_size = input_size
28
+ self.output_size = output_size
29
+ self.num_heads = num_heads
30
+ self.hidden_size = hidden_size
31
+ self.num_transformer_layers = num_transformer_layers
32
+ self.dropout = dropout
33
+ self.ptm_type = ptm_type
34
+ self.esm_ratio = esm_ratio
35
+ self.lambda_ = lambda_
36
+
37
+ PDeepPPConfig.register_for_auto_class()
modeling_PDeepPP.py CHANGED
@@ -5,7 +5,7 @@ from typing import Optional, Tuple, Union
5
  from transformers.modeling_utils import PreTrainedModel
6
  from transformers.utils import logging
7
 
8
- from configuration_PDeepPP import PDeepPPConfig
9
 
10
  logger = logging.get_logger(__name__)
11
 
 
5
  from transformers.modeling_utils import PreTrainedModel
6
  from transformers.utils import logging
7
 
8
+ from configuration_pdeeppp import PDeepPPConfig
9
 
10
  logger = logging.get_logger(__name__)
11