File size: 8,021 Bytes
7fab2c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
#!/usr/bin/env python3
"""Production Sentence Embedding Model API"""

import torch
import json
import os
import numpy as np
import re
from typing import List, Union, Tuple, Dict
import time

class SentenceEmbeddingInference:
    def __init__(self, model_dir: str):
        self.model_dir = model_dir
        self.model = None
        self.vocab = None
        self.id_to_token = None
        self.word_pattern = re.compile(r'\b\w+\b|[.,!?;]')
        self.load_models()
    
    def load_models(self):
        print("πŸ”„ Loading sentence embedding model...")
        
        try:
            torchscript_path = os.path.join(self.model_dir, "exports", "model_torchscript.pt")
            if os.path.exists(torchscript_path):
                self.model = torch.jit.load(torchscript_path, map_location='cpu')
                print("βœ… Loaded TorchScript model")
            else:
                print("⚠️ TorchScript model not found")
                return False
            
            vocab_path = os.path.join(self.model_dir, "tokenizer", "vocab.json")
            if os.path.exists(vocab_path):
                with open(vocab_path, 'r', encoding='utf-8') as f:
                    self.vocab = json.load(f)
                print(f"βœ… Loaded vocabulary with {len(self.vocab)} tokens")
            
            id_to_token_path = os.path.join(self.model_dir, "tokenizer", "id_to_token.json")
            if os.path.exists(id_to_token_path):
                with open(id_to_token_path, 'r', encoding='utf-8') as f:
                    id_to_token_str = json.load(f)
                    self.id_to_token = {int(k): v for k, v in id_to_token_str.items()}
            else:
                self.id_to_token = {v: k for k, v in self.vocab.items()}
            
            self.model.eval()
            print("βœ… Model ready for inference")
            return True
            
        except Exception as e:
            print(f"❌ Failed to load model: {e}")
            return False
    
    def encode_text(self, text: str) -> List[int]:
        if not text or not self.vocab:
            return []
        
        tokens = []
        words = self.word_pattern.findall(text.lower())
        
        for word in words:
            word_boundary = word + "</w>"
            if word_boundary in self.vocab:
                tokens.append(self.vocab[word_boundary])
            elif word in self.vocab:
                tokens.append(self.vocab[word])
            else:
                for char in word:
                    if char in self.vocab:
                        tokens.append(self.vocab[char])
                    else:
                        tokens.append(self.vocab.get("[UNK]", 1))
        
        cls_token = self.vocab.get("[CLS]", 2)
        sep_token = self.vocab.get("[SEP]", 3)
        
        return [cls_token] + tokens + [sep_token]
    
    def get_embeddings(self, texts: Union[str, List[str]], batch_size: int = 8) -> np.ndarray:
        if isinstance(texts, str):
            texts = [texts]
        
        if not self.model:
            raise RuntimeError("Model not loaded.")
        
        embeddings = []
        
        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i + batch_size]
            batch_embeddings = []
            
            for text in batch_texts:
                tokens = self.encode_text(text)[:128]
                
                attention_mask = [1] * len(tokens) + [0] * (128 - len(tokens))
                tokens = tokens + [0] * (128 - len(tokens))
                
                input_ids = torch.tensor([tokens], dtype=torch.long)
                attention_mask_tensor = torch.tensor([attention_mask], dtype=torch.float)
                
                with torch.no_grad():
                    embedding = self.model(input_ids, attention_mask_tensor)
                    batch_embeddings.append(embedding.squeeze(0).cpu().numpy())
            
            embeddings.extend(batch_embeddings)
        
        return np.array(embeddings)
    
    def compute_similarity(self, text1: str, text2: str) -> float:
        embeddings = self.get_embeddings([text1, text2])
        
        emb1 = embeddings[0] / (np.linalg.norm(embeddings[0]) + 1e-8)
        emb2 = embeddings[1] / (np.linalg.norm(embeddings[1]) + 1e-8)
        
        similarity = np.dot(emb1, emb2)
        return float(np.clip(similarity, -1.0, 1.0))
    
    def find_similar_texts(self, query: str, candidates: List[str], top_k: int = 5) -> List[Tuple[str, float]]:
        if not candidates:
            return []
        
        query_embedding = self.get_embeddings([query])[0]
        query_norm = query_embedding / (np.linalg.norm(query_embedding) + 1e-8)
        
        candidate_embeddings = self.get_embeddings(candidates)
        
        similarities = []
        for i, candidate_emb in enumerate(candidate_embeddings):
            candidate_norm = candidate_emb / (np.linalg.norm(candidate_emb) + 1e-8)
            similarity = np.dot(query_norm, candidate_norm)
            similarities.append((candidates[i], float(similarity)))
        
        similarities.sort(key=lambda x: x[1], reverse=True)
        return similarities[:top_k]
    
    def benchmark_performance(self, num_texts: int = 100) -> Dict[str, float]:
        print(f"πŸš€ Benchmarking performance with {num_texts} texts...")
        
        test_texts = [f"This is test sentence number {i} for benchmarking performance." for i in range(num_texts)]
        
        start_time = time.time()
        embeddings = self.get_embeddings(test_texts)
        end_time = time.time()
        
        total_time = end_time - start_time
        texts_per_second = num_texts / total_time
        avg_time_per_text = total_time / num_texts * 1000
        
        embedding_memory_mb = embeddings.nbytes / (1024 * 1024)
        
        results = {
            'texts_per_second': texts_per_second,
            'avg_time_per_text_ms': avg_time_per_text,
            'total_time_seconds': total_time,
            'embedding_memory_mb': embedding_memory_mb,
            'embedding_dimensions': embeddings.shape[1]
        }
        
        print(f"πŸ“Š Benchmark Results:")
        print(f"   Texts per second: {texts_per_second:.1f}")
        print(f"   Average time per text: {avg_time_per_text:.2f}ms")
        print(f"   Embedding dimensions: {embeddings.shape[1]}")
        print(f"   Memory usage: {embedding_memory_mb:.2f}MB")
        
        return results

if __name__ == "__main__":
    model = SentenceEmbeddingInference("./")
    
    if model.model is None:
        print("❌ Failed to load model. Exiting.")
        exit(1)
    
    test_sentences = [
        "The cat sat on the mat.",
        "A feline rested on the rug.",
        "Dogs are loyal companions.",
        "Programming requires logical thinking.",
        "Machine learning transforms data into insights.",
        "Natural language processing helps computers understand text."
    ]
    
    print("\nπŸ§ͺ Testing sentence embeddings...")
    
    embeddings = model.get_embeddings(test_sentences)
    print(f"Generated embeddings shape: {embeddings.shape}")
    
    similarity = model.compute_similarity(test_sentences[0], test_sentences[1])
    print(f"\nSimilarity between:")
    print(f"  '{test_sentences[0]}'")
    print(f"  '{test_sentences[1]}'")
    print(f"  Similarity: {similarity:.4f}")
    
    query = "What are cats like?"
    similar_texts = model.find_similar_texts(query, test_sentences, top_k=3)
    print(f"\nMost similar to '{query}':")
    for text, score in similar_texts:
        print(f"  {score:.4f}: {text}")
    
    print("\n" + "="*50)
    benchmark_results = model.benchmark_performance(50)
    
    print("\nβœ… Model testing completed successfully!")