File size: 1,294 Bytes
e582574 0455608 e582574 0455608 e582574 6a20bf6 0455608 e582574 0455608 e582574 0455608 e582574 0455608 e582574 0455608 e582574 0455608 e582574 0455608 e582574 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import logging
import os
import pandas as pd
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import dataset_lib.multimodal as multimodal
from dataset_lib.config import Config
from dataset_lib.config import Constants as c
from dataset_lib.datasets import get_dataset
logger = logging.getLogger(__name__)
@torch.no_grad()
def encode(config: Config, device=c.DEVICE, workdir=c.WORKDIR):
logger.info(
f"Encoding dataset {config.data.dataset.lower()} with"
f" backbone = {config.data.backbone}"
)
datasets = get_dataset(config.data.dataset)
encode_image = multimodal.get_image_encoder(config, device=device)
for op, dataset in datasets.items():
data = {"embedding": [], "label": []}
for image, label in tqdm(dataset, desc=f"Encoding {op}"):
embedding = encode_image(image).float()
embedding /= torch.linalg.norm(embedding, dim=-1, keepdim=True)
embedding = embedding.cpu().numpy()
data["embedding"].extend(embedding)
data["label"].append(label)
df = pd.DataFrame(data)
data_path = os.path.join(
f"{config.data.dataset.lower()}_{op}_{config.backbone_name()}.parquet"
)
df.to_parquet(data_path, index=False)
|