addgbf commited on
Commit
6ed27b2
·
verified ·
1 Parent(s): 530c4d7

Update server1.py

Browse files
Files changed (1) hide show
  1. server1.py +5 -13
server1.py CHANGED
@@ -151,22 +151,13 @@ def _topk_cosine(text_feats: torch.Tensor, text_labels: List[str], img_feat: tor
151
  return [{"label": text_labels[int(i)], "confidence": round(float(c)*100.0, 2)} for i, c in zip(idxs, conf)]
152
 
153
  def process_image_bytes(front_bytes: bytes, back_bytes: Optional[bytes] = None):
 
154
  if not front_bytes or len(front_bytes) < 128:
155
  raise UnidentifiedImageError("imagen invalida")
156
 
 
157
  img_front = Image.open(io.BytesIO(front_bytes))
158
- feat_front = _encode_pil(img_front)
159
-
160
- if back_bytes:
161
- try:
162
- img_back = Image.open(io.BytesIO(back_bytes))
163
- feat_back = _encode_pil(img_back)
164
- img_feat = (feat_front + feat_back) / 2
165
- img_feat = img_feat / img_feat.norm(dim=-1, keepdim=True)
166
- except Exception:
167
- img_feat = feat_front
168
- else:
169
- img_feat = feat_front
170
 
171
  # paso 1: modelo
172
  top_model = _topk_cosine(model_embeddings, model_labels, img_feat, k=1)[0]
@@ -176,7 +167,7 @@ def process_image_bytes(front_bytes: bytes, back_bytes: Optional[bytes] = None):
176
  marca = partes[0] if len(partes) >= 1 else ""
177
  modelo = partes[1] if len(partes) == 2 else ""
178
 
179
- # paso 2: versiones con cache (misma logica, sin bucle global cada vez)
180
  labels_sub, embeds_sub = _get_versions_subset(modelo_full)
181
  if not labels_sub:
182
  return {"brand": marca.upper(), "model": modelo.title(), "version": ""}
@@ -192,6 +183,7 @@ def process_image_bytes(front_bytes: bytes, back_bytes: Optional[bytes] = None):
192
 
193
  return {"brand": marca.upper(), "model": modelo.title(), "version": ver.title() if ver else ""}
194
 
 
195
  # ===== endpoints =====
196
  @app.get("/")
197
  def root():
 
151
  return [{"label": text_labels[int(i)], "confidence": round(float(c)*100.0, 2)} for i, c in zip(idxs, conf)]
152
 
153
  def process_image_bytes(front_bytes: bytes, back_bytes: Optional[bytes] = None):
154
+ # back se admite pero se ignora por completo
155
  if not front_bytes or len(front_bytes) < 128:
156
  raise UnidentifiedImageError("imagen invalida")
157
 
158
+ # solo frontal
159
  img_front = Image.open(io.BytesIO(front_bytes))
160
+ img_feat = _encode_pil(img_front)
 
 
 
 
 
 
 
 
 
 
 
161
 
162
  # paso 1: modelo
163
  top_model = _topk_cosine(model_embeddings, model_labels, img_feat, k=1)[0]
 
167
  marca = partes[0] if len(partes) >= 1 else ""
168
  modelo = partes[1] if len(partes) == 2 else ""
169
 
170
+ # paso 2: versiones con cache
171
  labels_sub, embeds_sub = _get_versions_subset(modelo_full)
172
  if not labels_sub:
173
  return {"brand": marca.upper(), "model": modelo.title(), "version": ""}
 
183
 
184
  return {"brand": marca.upper(), "model": modelo.title(), "version": ver.title() if ver else ""}
185
 
186
+
187
  # ===== endpoints =====
188
  @app.get("/")
189
  def root():