File size: 1,454 Bytes
e8460c4
 
 
 
 
 
6b1dfdc
 
5588ffe
 
 
 
 
 
 
 
 
4c01a78
5588ffe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
---
language:
- ru
base_model:
- cointegrated/rubert-tiny2
pipeline_tag: text-classification
size:
- 114 MB
---

<pre><code class="python">
  import torch
  from optimum.onnxruntime import ORTModelForFeatureExtraction
  from transformers import AutoTokenizer
  import numpy as np


  model_path = ".../bert-onnx-optim/"
  file_name = "model_optimized.onnx"
  
  encoder = ORTModelForFeatureExtraction.from_pretrained(
            model_path,
            file_name=file_name
            )
  tokenizer = AutoTokenizer.from_pretrained(model_path)

  def encode(self, texts, batch_size=8, normalize=True):
    """
        Fetch embeddings for text
        :input:
            Any (str/list): text
            int: batch size
            bool: need normilize

        :output:
            list: list fetch embeddings
    """
    log.debug(f"Data: {texts}")
    if isinstance(texts, str):
        texts = [texts]
    all_embeddings = []
    for i in range(0, len(texts), batch_size):
        batch = texts[i:i+batch_size]
        inputs = self.tokenizer(batch, return_tensors="pt", padding=True, truncation=True)

        with torch.no_grad():
            outputs = self.encoder(**inputs)
            embeddings = outputs.last_hidden_state.mean(dim=1)

        if normalize:
            embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
        all_embeddings.append(embeddings.cpu().numpy())
    return np.vstack(all_embeddings)
</code></pre>