JalalKhal commited on
Commit
c3443ec
·
verified ·
1 Parent(s): 6278bb5

hf model and vllm integration files

Browse files
config.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "EmbedderModel"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_embedder.EmbedderConfig",
7
+ "AutoModel": "modeling_embedder.EmbedderModel"
8
+ },
9
+ "base_model_name": "nomic-ai/nomic-embed-text-v1.5",
10
+ "dropout": 0.0,
11
+ "dtype": "float32",
12
+ "encoder_config": {
13
+ "_name_or_path": "nomic-ai/nomic-embed-text-v1.5",
14
+ "activation_function": "swiglu",
15
+ "architectures": [
16
+ "NomicBertModel"
17
+ ],
18
+ "attn_pdrop": 0.0,
19
+ "auto_map": {
20
+ "AutoConfig": "nomic-ai/nomic-bert-2048--configuration_hf_nomic_bert.NomicBertConfig",
21
+ "AutoModel": "nomic-ai/nomic-bert-2048--modeling_hf_nomic_bert.NomicBertModel",
22
+ "AutoModelForMaskedLM": "nomic-ai/nomic-bert-2048--modeling_hf_nomic_bert.NomicBertForPreTraining",
23
+ "AutoModelForMultipleChoice": "nomic-ai/nomic-bert-2048--modeling_hf_nomic_bert.NomicBertForMultipleChoice",
24
+ "AutoModelForQuestionAnswering": "nomic-ai/nomic-bert-2048--modeling_hf_nomic_bert.NomicBertForQuestionAnswering",
25
+ "AutoModelForSequenceClassification": "nomic-ai/nomic-bert-2048--modeling_hf_nomic_bert.NomicBertForSequenceClassification",
26
+ "AutoModelForTokenClassification": "nomic-ai/nomic-bert-2048--modeling_hf_nomic_bert.NomicBertForTokenClassification"
27
+ },
28
+ "bos_token_id": null,
29
+ "causal": false,
30
+ "dense_seq_output": true,
31
+ "dtype": "float32",
32
+ "embd_pdrop": 0.0,
33
+ "eos_token_id": null,
34
+ "fused_bias_fc": true,
35
+ "fused_dropout_add_ln": true,
36
+ "initializer_range": 0.02,
37
+ "layer_norm_epsilon": 1e-12,
38
+ "max_trained_positions": 2048,
39
+ "mlp_fc1_bias": false,
40
+ "mlp_fc2_bias": false,
41
+ "model_type": "nomic_bert",
42
+ "n_embd": 768,
43
+ "n_head": 12,
44
+ "n_inner": 3072,
45
+ "n_layer": 12,
46
+ "n_positions": 8192,
47
+ "pad_vocab_size_multiple": 64,
48
+ "parallel_block": false,
49
+ "parallel_block_tied_norm": false,
50
+ "prenorm": false,
51
+ "qkv_proj_bias": false,
52
+ "reorder_and_upcast_attn": false,
53
+ "resid_pdrop": 0.0,
54
+ "rotary_emb_base": 1000,
55
+ "rotary_emb_fraction": 1.0,
56
+ "rotary_emb_interleaved": false,
57
+ "rotary_emb_scale_base": null,
58
+ "rotary_scaling_factor": null,
59
+ "scale_attn_by_inverse_layer_idx": false,
60
+ "scale_attn_weights": true,
61
+ "summary_activation": null,
62
+ "summary_first_dropout": 0.0,
63
+ "summary_proj_to_labels": true,
64
+ "summary_type": "cls_index",
65
+ "summary_use_proj": true,
66
+ "type_vocab_size": 2,
67
+ "use_cache": true,
68
+ "use_flash_attn": true,
69
+ "use_rms_norm": false,
70
+ "use_xentropy": true,
71
+ "vocab_size": 30528
72
+ },
73
+ "encoder_only": false,
74
+ "model_type": "embedder",
75
+ "num_blocks": 2,
76
+ "transformers_version": "4.57.3"
77
+ }
configuration_embedder.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+
3
+ from transformers import AutoConfig, PretrainedConfig
4
+
5
+
6
+ class EmbedderConfig(PretrainedConfig):
7
+ model_type = "embedder"
8
+
9
+ def __init__(
10
+ self,
11
+ base_model_name: str = "nomic-ai/nomic-embed-text-v1.5",
12
+ num_blocks: int = 2,
13
+ dropout: float = 0.0,
14
+ encoder_only: bool = False,
15
+ **kwargs: Any,
16
+ ):
17
+ super().__init__(**kwargs)
18
+ self.base_model_name = base_model_name
19
+ self.num_blocks = num_blocks
20
+ self.dropout = dropout
21
+ self.encoder_only = encoder_only
22
+ self.encoder_config = AutoConfig.from_pretrained(
23
+ base_model_name,
24
+ trust_remote_code=True,
25
+ )
26
+ self.auto_map = {
27
+ "AutoConfig": "configuration_embedder.EmbedderConfig",
28
+ "AutoModel": "modeling_embedder.EmbedderModel",
29
+ }
30
+
31
+ def __getattr__(self, name: str) -> Any:
32
+ if name != "encoder_config" and hasattr(self.encoder_config, name):
33
+ return getattr(self.encoder_config, name)
34
+ raise AttributeError(name)
35
+
36
+
37
+ EmbedderConfig.register_for_auto_class()
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e8b233a6180cf2e969f267c82df53de264a7d8433b723d666f0c5c9e85bd6a8
3
+ size 561127080
modeling_embedder.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Optional, cast
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from torch import nn
7
+ from transformers import AutoModel, PreTrainedModel
8
+ from transformers.modeling_outputs import BaseModelOutput
9
+
10
+ from .configuration_embedder import EmbedderConfig
11
+
12
+
13
+ class EncoderBlock(nn.Module):
14
+ def __init__(self, dim: int, hidden_dim: int, dropout: float):
15
+ super().__init__()
16
+ self.net = nn.Sequential(
17
+ nn.Linear(dim, hidden_dim),
18
+ nn.ReLU(),
19
+ )
20
+ self.norm = nn.LayerNorm(dim)
21
+ self.dropout = nn.Dropout(dropout)
22
+ self.proj = nn.Linear(hidden_dim, dim)
23
+
24
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
25
+ residual = x
26
+ x = self.net(x)
27
+ x = self.dropout(x)
28
+ x = self.proj(x)
29
+ return cast(torch.Tensor, self.norm(x + residual))
30
+
31
+
32
+ class Head(nn.Module):
33
+ def __init__(self, dim: int, num_blocks: int = 1, dropout: float = 0):
34
+ super().__init__()
35
+ self.blocks = nn.Sequential(
36
+ *[EncoderBlock(dim=dim, hidden_dim=dim, dropout=dropout) for _ in range(num_blocks)]
37
+ )
38
+ self.proj = nn.Linear(dim, dim)
39
+
40
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
41
+ x = self.blocks(x)
42
+ x = self.proj(x)
43
+ return x
44
+
45
+
46
+ class EmbedderModel(PreTrainedModel):
47
+ config_class = EmbedderConfig # type: ignore[assignment]
48
+ base_model_prefix = "model"
49
+ _supports_attention_backend = True
50
+
51
+ def __init__(self, config: EmbedderConfig):
52
+ super().__init__(config)
53
+ self.encoder = AutoModel.from_config(
54
+ config.encoder_config,
55
+ trust_remote_code=True,
56
+ )
57
+ self._init_requires_grad(self.encoder)
58
+ self.head = Head(
59
+ dim=self.encoder.embeddings.word_embeddings.embedding_dim,
60
+ num_blocks=config.num_blocks,
61
+ dropout=config.dropout,
62
+ )
63
+
64
+ def _init_requires_grad(self, module: nn.Module) -> None:
65
+ for p in module.parameters():
66
+ p.requires_grad = False
67
+
68
+ def forward(
69
+ self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs: Any
70
+ ) -> BaseModelOutput:
71
+ hidden_states = self.encoder(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
72
+ if not self.config.encoder_only:
73
+ emb = self.head(hidden_states) # B, T, D
74
+ emb = F.normalize(emb, dim=-1)
75
+ return BaseModelOutput(last_hidden_state=emb) # type: ignore[arg-type]
76
+
77
+
78
+ EmbedderModel.register_for_auto_class()
vllm_configuration_embedder.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ if TYPE_CHECKING:
4
+ from vllm.config import VllmConfig
5
+
6
+ from vllm.model_executor.models.config import VerifyAndUpdateConfig
7
+
8
+
9
+ class EmbedderModelConfig(VerifyAndUpdateConfig):
10
+
11
+ @staticmethod
12
+ def verify_and_update_config(vllm_config: "VllmConfig") -> None:
13
+ from copy import deepcopy
14
+
15
+ from vllm.transformers_utils.config import set_default_rope_theta
16
+
17
+ config = vllm_config.model_config.hf_config
18
+ assert config.__class__.__name__ == "EmbedderConfig"
19
+ assert config.activation_function in ["swiglu", "gelu"]
20
+ config.position_embedding_type = getattr(config, "position_embedding_type", "rope")
21
+
22
+ if config.activation_function == "swiglu":
23
+ config.hidden_act = "silu"
24
+ else:
25
+ config.hidden_act = config.activation_function
26
+
27
+ assert config.mlp_fc1_bias == config.mlp_fc2_bias == config.qkv_proj_bias
28
+ config.bias = config.qkv_proj_bias
29
+
30
+ assert config.rotary_emb_scale_base is None
31
+ assert not config.rotary_emb_interleaved
32
+
33
+ config.layer_norm_eps = config.layer_norm_epsilon
34
+ config.intermediate_size = config.n_inner
35
+ config.hidden_size = config.n_embd
36
+ config.num_hidden_layers = config.n_layer
37
+
38
+ head_dim = config.hidden_size // config.num_attention_heads
39
+ rotary_emb_dim = int(head_dim * config.rotary_emb_fraction)
40
+ max_trained_positions = getattr(config, "max_trained_positions", 2048)
41
+
42
+ set_default_rope_theta(config, default_theta=config.rotary_emb_base)
43
+
44
+ config.rotary_kwargs = {
45
+ "head_size": head_dim,
46
+ "rotary_dim": rotary_emb_dim,
47
+ "max_position": max_trained_positions,
48
+ "rope_parameters": config.rope_parameters,
49
+ }
50
+
51
+ # we ignore config.rotary_scaling_factor so that for datasets shorter
52
+ # than max_trained_positions 2048, the results are consistent
53
+ # with SentenceTransformer.
54
+ # The context extension uses vllm style rope_theta and rope_parameters.
55
+ # See #17785 #18755
56
+ if not vllm_config.model_config.hf_overrides and vllm_config.model_config.original_max_model_len is None:
57
+ # Default
58
+ # Reset max_model_len to max_trained_positions.
59
+ # nomic-embed-text-v2-moe the length is set to 512
60
+ # by sentence_bert_config.json.
61
+ max_model_len = min(vllm_config.model_config.max_model_len, max_trained_positions) # type: ignore[unreachable]
62
+
63
+ vllm_config.recalculate_max_model_len(max_model_len)
64
+
65
+ else:
66
+ # We need to re-verify max_model_len to avoid lengths
67
+ # greater than position_embedding.
68
+ model_config = vllm_config.model_config
69
+ hf_text_config = model_config.hf_text_config
70
+
71
+ if isinstance(model_config.hf_overrides, dict):
72
+ # hf_overrides_kw
73
+ max_model_len = model_config.hf_overrides.get("max_model_len", vllm_config.model_config.max_model_len)
74
+ else:
75
+ # hf_overrides_fn
76
+ # This might be overridden by sentence_bert_config.json.
77
+ max_model_len = vllm_config.model_config.max_model_len
78
+
79
+ # reset hf_text_config for recalculate_max_model_len.
80
+ if hasattr(hf_text_config, "max_model_len"):
81
+ delattr(hf_text_config, "max_model_len")
82
+ hf_text_config.max_position_embeddings = max_trained_positions
83
+ hf_text_config.rope_parameters = config.rotary_kwargs["rope_parameters"]
84
+
85
+ # The priority of sentence_bert_config.json is higher
86
+ # than max_position_embeddings
87
+ encoder_config = deepcopy(model_config.encoder_config)
88
+ if encoder_config:
89
+ encoder_config.pop("max_seq_length", None)
90
+ model_config.encoder_config = encoder_config
91
+
92
+ vllm_config.recalculate_max_model_len(max_model_len)
vllm_modeling_embedder.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ from typing import Optional, cast
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from vllm.compilation.decorators import support_torch_compile
9
+ from vllm.config import VllmConfig
10
+ from vllm.model_executor.models.bert_with_rope import NomicBertModel
11
+ from vllm.model_executor.models.interfaces_base import default_pooling_type
12
+ from vllm.model_executor.models.utils import WeightsMapper
13
+
14
+
15
+ class EncoderBlock(nn.Module):
16
+ def __init__(self, dim: int, hidden_dim: int, dropout: float):
17
+ super().__init__()
18
+ self.net = nn.Sequential(
19
+ nn.Linear(dim, hidden_dim),
20
+ nn.ReLU(),
21
+ )
22
+ self.norm = nn.LayerNorm(dim)
23
+ self.dropout = nn.Dropout(dropout)
24
+ self.proj = nn.Linear(hidden_dim, dim)
25
+
26
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
27
+ residual = x
28
+ x = self.net(x)
29
+ x = self.dropout(x)
30
+ x = self.proj(x)
31
+ return cast(torch.Tensor, self.norm(x + residual))
32
+
33
+
34
+ class Head(nn.Module):
35
+ def __init__(self, dim: int, num_blocks: int = 1, dropout: float = 0):
36
+ super().__init__()
37
+ self.blocks = nn.Sequential(
38
+ *[EncoderBlock(dim=dim, hidden_dim=dim, dropout=dropout) for _ in range(num_blocks)]
39
+ )
40
+ self.proj = nn.Linear(dim, dim)
41
+
42
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
43
+ x = self.blocks(x)
44
+ x = self.proj(x)
45
+ return x
46
+
47
+
48
+ @support_torch_compile
49
+ @default_pooling_type("CLS")
50
+ class EmbedderModel(nn.Module):
51
+ """
52
+ vLLM wrapper for HF-trained EmbedderModel
53
+ (encoder + custom graph head)
54
+ """
55
+
56
+ # HF state_dict keys start with "model."
57
+ hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""})
58
+
59
+ def __init__(
60
+ self,
61
+ *,
62
+ vllm_config: VllmConfig,
63
+ prefix: str = "",
64
+ ):
65
+
66
+ super().__init__()
67
+ self.hf_config = vllm_config.model_config.hf_config
68
+ # --------------------------------------------------
69
+ # Base encoder (identical to training)
70
+ # --------------------------------------------------
71
+ self.encoder = NomicBertModel(
72
+ vllm_config=vllm_config,
73
+ prefix=f"{prefix}.encoder",
74
+ add_pooling_layer=False,
75
+ )
76
+ # --------------------------------------------------
77
+ # Custom head (must match HF exactly)
78
+ # --------------------------------------------------
79
+ self.head = Head(
80
+ dim=self.hf_config.hidden_size,
81
+ num_blocks=self.hf_config.num_blocks,
82
+ dropout=self.hf_config.dropout,
83
+ )
84
+
85
+ def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
86
+ return self.encoder.embed_input_ids(input_ids)
87
+
88
+ def forward(
89
+ self,
90
+ input_ids: torch.Tensor,
91
+ positions: torch.Tensor,
92
+ intermediate_tensors: Optional[torch.Tensor] = None,
93
+ inputs_embeds: Optional[torch.Tensor] = None,
94
+ token_type_ids: Optional[torch.Tensor] = None,
95
+ ) -> torch.Tensor:
96
+ # vLLM manages attention & KV internally
97
+ hidden_states = self.encoder(
98
+ input_ids=input_ids,
99
+ positions=positions,
100
+ inputs_embeds=inputs_embeds,
101
+ token_type_ids=token_type_ids,
102
+ )
103
+ if not self.hf_config.encoder_only:
104
+ # Head + normalize (same as HF)
105
+ emb = self.head(hidden_states)
106
+ emb = F.normalize(emb, dim=-1)
107
+ return emb