File size: 1,804 Bytes
0988e73
52a4b0b
445b1a7
52a4b0b
 
 
bbb7cd2
52a4b0b
 
 
445b1a7
52a4b0b
 
b68049a
52a4b0b
b68049a
52a4b0b
 
 
bbb7cd2
52a4b0b
 
 
bbb7cd2
52a4b0b
 
 
bbb7cd2
52a4b0b
 
f6c9b84
52a4b0b
 
 
 
b68049a
52a4b0b
e81a46f
 
52a4b0b
bbb7cd2
f6c9b84
52a4b0b
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import os
from fastapi import FastAPI, Request, HTTPException, Header
from transformers import AutoTokenizer, AutoModel
from dotenv import load_dotenv
import torch
import datetime

# Carrega variáveis do .env
load_dotenv()
API_TOKEN = os.getenv('API_TOKEN')

# Configura cache do Hugging Face
os.environ['TRANSFORMERS_CACHE'] = '/code/cache'

app = FastAPI()

print('🔄 Carregando modelo e5-large-v2 do Hugging Face...')
tokenizer = AutoTokenizer.from_pretrained("intfloat/e5-large-v2")
model = AutoModel.from_pretrained("intfloat/e5-large-v2").eval()

@app.get("/")
def read_root():
    return {"message": "API ativa 🙌"}

@app.post("/embed")
async def embed_text(request: Request, authorization: str = Header(None)):
    print(f'{datetime.datetime.now()} - Requisição recebida para /embed')

    if authorization != f'Bearer {API_TOKEN}':
        raise HTTPException(status_code=401, detail="Não autorizado")

    data = await request.json()
    texto = data.get('texto')
    if not texto:
        return {"error": "Campo 'texto' obrigatório"}

    # e5 requer o prefixo 'query: ' para textos de consulta
    texto = 'query: ' + texto.strip()
    # texto = 'passage: ' + texto.strip()
    print(f'{datetime.datetime.now()} - 🔍 Texto recebido para embedding: {texto}')


    inputs = tokenizer(texto, return_tensors='pt', truncation=True, padding=True)
    with torch.no_grad():
        outputs = model(**inputs)
        embeddings = outputs.last_hidden_state
        mask = inputs['attention_mask'].unsqueeze(-1).expand(embeddings.size())
        masked_embeddings = embeddings * mask
        summed = torch.sum(masked_embeddings, dim=1)
        counted = torch.clamp(mask.sum(1), min=1e-9)
        mean_pooled = (summed / counted).squeeze().tolist()

    return {"embedding": mean_pooled}