File size: 6,480 Bytes
a54fd97 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | """
Embedding utilities - Generate vector embeddings using SentenceTransformers
Supports Qwen3 Embedding models through SentenceTransformers interface
"""
from typing import List, Optional, Dict, Any
import numpy as np
import config
import os
class EmbeddingModel:
"""
Embedding model using SentenceTransformers (supports Qwen3 and other models)
"""
def __init__(self, model_name: str = None, use_optimization: bool = True):
self.model_name = model_name or config.EMBEDDING_MODEL
self.use_optimization = use_optimization
print(f"Loading embedding model: {self.model_name}")
# Check if it's a Qwen3 model (through SentenceTransformers)
if self.model_name.startswith("qwen3"):
self._init_qwen3_sentence_transformer()
else:
self._init_standard_sentence_transformer()
def _init_qwen3_sentence_transformer(self):
"""Initialize Qwen3 model using SentenceTransformers"""
try:
from sentence_transformers import SentenceTransformer
# Map model names to actual model paths
qwen3_models = {
"qwen3-0.6b": "Qwen/Qwen3-Embedding-0.6B",
"qwen3-4b": "Qwen/Qwen3-Embedding-4B",
"qwen3-8b": "Qwen/Qwen3-Embedding-8B"
}
model_path = qwen3_models.get(self.model_name, self.model_name)
print(f"Loading Qwen3 model via SentenceTransformers: {model_path}")
# Initialize with optimization settings
if self.use_optimization:
try:
# Try to use flash_attention_2 and left padding for better performance
self.model = SentenceTransformer(
model_path,
model_kwargs={
"attn_implementation": "flash_attention_2",
"device_map": "auto"
},
tokenizer_kwargs={"padding_side": "left"},
trust_remote_code=True
)
print("Qwen3 loaded with flash_attention_2 optimization")
except Exception as e:
print(f"Flash attention failed ({e}), using standard loading...")
self.model = SentenceTransformer(model_path, trust_remote_code=True)
else:
self.model = SentenceTransformer(model_path, trust_remote_code=True)
self.dimension = self.model.get_sentence_embedding_dimension()
self.model_type = "qwen3_sentence_transformer"
# Check if Qwen3 supports query prompts
self.supports_query_prompt = hasattr(self.model, 'prompts') and 'query' in getattr(self.model, 'prompts', {})
print(f"Qwen3 model loaded successfully with dimension: {self.dimension}")
if self.supports_query_prompt:
print("Query prompt support detected")
except Exception as e:
print(f"Failed to load Qwen3 model: {e}")
print("Falling back to default SentenceTransformers model...")
self._fallback_to_sentence_transformer()
def _init_standard_sentence_transformer(self):
"""Initialize standard SentenceTransformer model"""
try:
from sentence_transformers import SentenceTransformer
self.model = SentenceTransformer(self.model_name)
self.dimension = self.model.get_sentence_embedding_dimension()
self.model_type = "sentence_transformer"
self.supports_query_prompt = False
print(f"SentenceTransformer model loaded with dimension: {self.dimension}")
except Exception as e:
print(f"Failed to load SentenceTransformer model: {e}")
raise
def _fallback_to_sentence_transformer(self):
"""Fallback to default SentenceTransformer model"""
fallback_model = "sentence-transformers/all-MiniLM-L6-v2"
print(f"Using fallback model: {fallback_model}")
self.model_name = fallback_model
self._init_standard_sentence_transformer()
def encode(self, texts: List[str], is_query: bool = False) -> np.ndarray:
"""
Encode list of texts to vectors
Args:
- texts: List of texts to encode
- is_query: Whether these are query texts (for Qwen3 prompt optimization)
"""
if isinstance(texts, str):
texts = [texts]
# Use query prompt for Qwen3 models when encoding queries
if self.model_type == "qwen3_sentence_transformer" and self.supports_query_prompt and is_query:
return self._encode_with_query_prompt(texts)
else:
return self._encode_standard(texts)
def encode_single(self, text: str, is_query: bool = False) -> np.ndarray:
"""
Encode single text
Args:
- text: Text to encode
- is_query: Whether this is a query text (for Qwen3 prompt optimization)
"""
return self.encode([text], is_query=is_query)[0]
def encode_query(self, queries: List[str]) -> np.ndarray:
"""
Encode queries with optimal settings for Qwen3
"""
return self.encode(queries, is_query=True)
def encode_documents(self, documents: List[str]) -> np.ndarray:
"""
Encode documents (no query prompt)
"""
return self.encode(documents, is_query=False)
def _encode_with_query_prompt(self, texts: List[str]) -> np.ndarray:
"""Encode texts using Qwen3 query prompt"""
try:
embeddings = self.model.encode(
texts,
prompt_name="query", # Use Qwen3's query prompt
show_progress_bar=False,
normalize_embeddings=True
)
return embeddings
except Exception as e:
print(f"Query prompt encoding failed: {e}, falling back to standard encoding")
return self._encode_standard(texts)
def _encode_standard(self, texts: List[str]) -> np.ndarray:
"""Encode texts using standard method"""
embeddings = self.model.encode(
texts,
show_progress_bar=False,
normalize_embeddings=True
)
return embeddings
|