Instructions to use xiaotinghe/buffer-embedding-002 with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use xiaotinghe/buffer-embedding-002 with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("feature-extraction", model="xiaotinghe/buffer-embedding-002", trust_remote_code=True)# Load model directly from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("xiaotinghe/buffer-embedding-002", trust_remote_code=True) model = AutoModel.from_pretrained("xiaotinghe/buffer-embedding-002", trust_remote_code=True) - Notebooks
- Google Colab
- Kaggle
Commit ·
c69924f
1
Parent(s): 0fdbcd5
Upload model
Browse files- embedding_model.py +0 -2
embedding_model.py
CHANGED
|
@@ -2,11 +2,9 @@ import torch
|
|
| 2 |
import torch.nn.functional as F
|
| 3 |
from torch import nn
|
| 4 |
from transformers import BloomForCausalLM, PreTrainedModel
|
| 5 |
-
from .configuration import BufferEmbeddingConfig
|
| 6 |
|
| 7 |
|
| 8 |
class DualModel(PreTrainedModel):
|
| 9 |
-
config_class = BufferEmbeddingConfig
|
| 10 |
_auto_class = "AutoModel"
|
| 11 |
def __init__(self, config):
|
| 12 |
super(DualModel, self).__init__(config)
|
|
|
|
| 2 |
import torch.nn.functional as F
|
| 3 |
from torch import nn
|
| 4 |
from transformers import BloomForCausalLM, PreTrainedModel
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
class DualModel(PreTrainedModel):
|
|
|
|
| 8 |
_auto_class = "AutoModel"
|
| 9 |
def __init__(self, config):
|
| 10 |
super(DualModel, self).__init__(config)
|