Mart78 commited on
Commit
37bae1e
·
verified ·
1 Parent(s): 24d9580

Create configuration_molformer.py

Browse files
Files changed (1) hide show
  1. configuration_molformer.py +158 -0
configuration_molformer.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Molformer model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from transformers.configuration_utils import PretrainedConfig
21
+ from transformers.onnx import OnnxConfig
22
+ from transformers.utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ MOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
28
+ "ibm/MoLFormer-XL-both-10pct": "https://huggingface.co/ibm/MoLFormer-XL-both-10pct/resolve/main/config.json",
29
+ }
30
+
31
+
32
+ class MolformerConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`MolformerModel`]. It is used to instantiate an
35
+ Molformer model according to the specified arguments, defining the model architecture. Instantiating a
36
+ configuration with the defaults will yield a similar configuration to that of the Molformer
37
+ [ibm/MoLFormer-XL-both-10pct](https://huggingface.co/ibm/MoLFormer-XL-both-10pct) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ vocab_size (`int`, *optional*, defaults to 2362):
45
+ Vocabulary size of the Molformer model. Defines the number of different tokens that can be represented by
46
+ the `inputs_ids` passed when calling [`MolformerModel`] or [`TFMolformerModel`].
47
+ hidden_size (`int`, *optional*, defaults to 768):
48
+ Dimension of the encoder layers and the pooler layer.
49
+ num_hidden_layers (`int`, *optional*, defaults to 12):
50
+ Number of hidden layers in the Transformer encoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 12):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ intermediate_size (`int`, *optional*, defaults to 768):
54
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
55
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
58
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ embedding_dropout_prob (`float`, *optional*, defaults to 0.2):
61
+ The dropout probability for the word embeddings.
62
+ max_position_embeddings (`int`, *optional*, defaults to 202):
63
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
64
+ just in case (e.g., 512 or 1024 or 1536).
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the layer normalization layers.
69
+ linear_attention_eps (`float`, *optional*, defaults to 1e-06):
70
+ The epsilon used by the linear attention layers normalization step.
71
+ num_random_features (`int`, *optional*, defaults to 32):
72
+ Random feature map dimension used in linear attention.
73
+ feature_map_kernel (`str` or `function`, *optional*, defaults to `"relu"`):
74
+ The non-linear activation function (function or string) in the generalized random features. If string,
75
+ `"gelu"`, `"relu"`, `"selu"`, and `"gelu_new"` ar supported.
76
+ deterministic_eval (`bool`, *optional*, defaults to `False`):
77
+ Whether the random features should only be redrawn when training or not. If `True` and `model.training` is
78
+ `False`, linear attention random feature weights will be constant, i.e., deterministic.
79
+ classifier_dropout_prob (`float`, *optional*):
80
+ The dropout probability for the classification head. If `None`, use `hidden_dropout_prob`.
81
+ classifier_skip_connection (`bool`, *optional*, defaults to `True`):
82
+ Whether a skip connection should be made between the layers of the classification head or not.
83
+ pad_token_id (`int`, *optional*, defaults to 2):
84
+ The id of the _padding_ token.
85
+
86
+ Example:
87
+
88
+ ```python
89
+ >>> from transformers import MolformerModel, MolformerConfig
90
+
91
+ >>> # Initializing a Molformer ibm/MoLFormer-XL-both-10pct style configuration
92
+ >>> configuration = MolformerConfig()
93
+
94
+ >>> # Initializing a model from the ibm/MoLFormer-XL-both-10pct style configuration
95
+ >>> model = MolformerModel(configuration)
96
+
97
+ >>> # Accessing the model configuration
98
+ >>> configuration = model.config
99
+ ```"""
100
+ model_type = "molformer"
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_size=2362,
105
+ hidden_size=768,
106
+ num_hidden_layers=12,
107
+ num_attention_heads=12,
108
+ intermediate_size=768,
109
+ hidden_act="gelu",
110
+ hidden_dropout_prob=0.1,
111
+ embedding_dropout_prob=0.2,
112
+ max_position_embeddings=202,
113
+ initializer_range=0.02,
114
+ layer_norm_eps=1e-12,
115
+ linear_attention_eps=1e-6,
116
+ num_random_features=32,
117
+ feature_map_kernel="relu",
118
+ deterministic_eval=False,
119
+ classifier_dropout_prob=None,
120
+ classifier_skip_connection=True,
121
+ pad_token_id=2,
122
+ **kwargs,
123
+ ):
124
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
125
+
126
+ self.vocab_size = vocab_size
127
+ self.hidden_size = hidden_size
128
+ self.num_hidden_layers = num_hidden_layers
129
+ self.num_attention_heads = num_attention_heads
130
+ self.hidden_act = hidden_act
131
+ self.intermediate_size = intermediate_size
132
+ self.hidden_dropout_prob = hidden_dropout_prob
133
+ self.embedding_dropout_prob = embedding_dropout_prob
134
+ self.max_position_embeddings = max_position_embeddings
135
+ self.initializer_range = initializer_range
136
+ self.layer_norm_eps = layer_norm_eps
137
+ self.linear_attention_eps = linear_attention_eps
138
+ self.num_random_features = num_random_features
139
+ self.feature_map_kernel = feature_map_kernel
140
+ self.deterministic_eval = deterministic_eval
141
+ self.classifier_dropout_prob = classifier_dropout_prob
142
+ self.classifier_skip_connection = classifier_skip_connection
143
+
144
+
145
+ # Copied from transformers.models.roberta.configuration_roberta.RobertaOnnxConfig with Roberta->Molformer
146
+ class MolformerOnnxConfig(OnnxConfig):
147
+ @property
148
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
149
+ if self.task == "multiple-choice":
150
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
151
+ else:
152
+ dynamic_axis = {0: "batch", 1: "sequence"}
153
+ return OrderedDict(
154
+ [
155
+ ("input_ids", dynamic_axis),
156
+ ("attention_mask", dynamic_axis),
157
+ ]
158
+ )