Instructions to use Synthyra/ESMplusplus_large with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use Synthyra/ESMplusplus_large with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("fill-mask", model="Synthyra/ESMplusplus_large", trust_remote_code=True)# Load model directly from transformers import AutoModelForMaskedLM model = AutoModelForMaskedLM.from_pretrained("Synthyra/ESMplusplus_large", trust_remote_code=True, dtype="auto") - Notebooks
- Google Colab
- Kaggle
Upload modeling_esm_plusplus.py with huggingface_hub
Browse files- modeling_esm_plusplus.py +5 -0
modeling_esm_plusplus.py
CHANGED
|
@@ -377,6 +377,11 @@ class Pooler:
|
|
| 377 |
attention_mask: Optional[torch.Tensor] = None,
|
| 378 |
attentions: Optional[torch.Tensor] = None
|
| 379 |
) -> torch.Tensor:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 380 |
final_emb: List[torch.Tensor] = []
|
| 381 |
for pooling_type in self.pooling_types:
|
| 382 |
final_emb.append(self.pooling_options[pooling_type](emb=emb, attention_mask=attention_mask, attentions=attentions))
|
|
|
|
| 377 |
attention_mask: Optional[torch.Tensor] = None,
|
| 378 |
attentions: Optional[torch.Tensor] = None
|
| 379 |
) -> torch.Tensor:
|
| 380 |
+
if attention_mask is not None:
|
| 381 |
+
assert attention_mask.sum(dim=-1).min() > 0, (
|
| 382 |
+
"Pooler received samples with all-zero attention masks. "
|
| 383 |
+
"This causes NaN from division by zero. Filter empty inputs before pooling."
|
| 384 |
+
)
|
| 385 |
final_emb: List[torch.Tensor] = []
|
| 386 |
for pooling_type in self.pooling_types:
|
| 387 |
final_emb.append(self.pooling_options[pooling_type](emb=emb, attention_mask=attention_mask, attentions=attentions))
|