jiaruic commited on
Commit
a7b155d
·
verified ·
1 Parent(s): 9e99684

Update model name

Browse files
mteb_llama_embed_nemotron_reasoning_3b.py → mteb_llama_nv_embed_reasoning_3b.py RENAMED
@@ -1,7 +1,7 @@
1
  # SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
  # SPDX-License-Identifier: Apache-2.0.
3
  """
4
- MTEB encoder and ModelMeta for nvidia/llama-embed-nemotron-reasoning-3b.
5
  """
6
 
7
  from mteb.models.model_meta import ModelMeta
@@ -30,8 +30,8 @@ BRIGHT_TASK_INSTRUCTIONS = {
30
 
31
  BRIGHT_PASSAGE_PREFIX = "passage: "
32
 
33
- class LlamaEmbedNemotronReasoning(LlamaEmbedNemotron):
34
- """LlamaEmbedNemotron for reasoning with BRIGHT benchmark prompts."""
35
 
36
  def __init__(self, model_name: str, revision: str, device: str | None = None, **kwargs) -> None:
37
  super().__init__(model_name, revision=revision, device=device)
@@ -63,10 +63,10 @@ class LlamaEmbedNemotronReasoning(LlamaEmbedNemotron):
63
  prefix = self.format_instruction(instruction, prompt_type)
64
  return self._extract_embeddings(inputs, instruction=prefix, **kwargs)
65
 
66
- LLAMA_EMBED_NEMOTRON_REASONING_3B_META = ModelMeta(
67
- loader=LlamaEmbedNemotronReasoning,
68
  loader_kwargs=dict(max_seq_length=8192),
69
- name="nvidia/llama-embed-nemotron-reasoning-3b",
70
  model_type=["dense"],
71
  languages=llama_embed_nemotron_evaluated_languages,
72
  open_weights=True,
@@ -75,9 +75,9 @@ LLAMA_EMBED_NEMOTRON_REASONING_3B_META = ModelMeta(
75
  n_parameters=3_212_749_824,
76
  memory_usage_mb=6000,
77
  embed_dim=3072,
78
- license="https://huggingface.co/nvidia/llama-embed-nemotron-reasoning-3b/blob/main/LICENSE",
79
  max_tokens=8192,
80
- reference="https://huggingface.co/nvidia/llama-embed-nemotron-reasoning-3b",
81
  similarity_fn_name="cosine",
82
  framework=["PyTorch", "Transformers"],
83
  use_instructions=True,
 
1
  # SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
  # SPDX-License-Identifier: Apache-2.0.
3
  """
4
+ MTEB encoder and ModelMeta for nvidia/llama-nv-embed-reasoning-3b.
5
  """
6
 
7
  from mteb.models.model_meta import ModelMeta
 
30
 
31
  BRIGHT_PASSAGE_PREFIX = "passage: "
32
 
33
+ class LlamaNvEmbedReasoning(LlamaEmbedNemotron):
34
+ """LlamaNvEmbedReasoning for reasoning with BRIGHT benchmark prompts."""
35
 
36
  def __init__(self, model_name: str, revision: str, device: str | None = None, **kwargs) -> None:
37
  super().__init__(model_name, revision=revision, device=device)
 
63
  prefix = self.format_instruction(instruction, prompt_type)
64
  return self._extract_embeddings(inputs, instruction=prefix, **kwargs)
65
 
66
+ LLAMA_NV_EMBED_REASONING_3B_META = ModelMeta(
67
+ loader=LlamaNvEmbedReasoning,
68
  loader_kwargs=dict(max_seq_length=8192),
69
+ name="nvidia/llama-nv-embed-reasoning-3b",
70
  model_type=["dense"],
71
  languages=llama_embed_nemotron_evaluated_languages,
72
  open_weights=True,
 
75
  n_parameters=3_212_749_824,
76
  memory_usage_mb=6000,
77
  embed_dim=3072,
78
+ license="https://huggingface.co/nvidia/llama-nv-embed-reasoning-3b/blob/main/LICENSE",
79
  max_tokens=8192,
80
+ reference="https://huggingface.co/nvidia/llama-nv-embed-reasoning-3b",
81
  similarity_fn_name="cosine",
82
  framework=["PyTorch", "Transformers"],
83
  use_instructions=True,