repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/__init__.py | zamba/models/__init__.py | from zamba.models.efficientnet_models import TimeDistributedEfficientNet # noqa: F401
from zamba.models.slowfast_models import SlowFast # noqa: F401
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/densepose/densepose_manager.py | zamba/models/densepose/densepose_manager.py | import json
import logging
from pathlib import Path
from typing import Optional
import cv2
try:
from densepose import add_densepose_config
from densepose.data.utils import get_class_to_mesh_name_mapping
from densepose.modeling.build import build_densepose_embedder
from densepose.structures.cse import DensePoseEmbeddingPredictorOutput
from densepose.vis.densepose_outputs_vertex import (
DensePoseOutputsTextureVisualizer,
DensePoseOutputsVertexVisualizer,
)
from densepose.vis.densepose_results_textures import get_texture_atlas
from densepose.vis.extractor import (
create_extractor,
)
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.engine.defaults import DefaultPredictor
from detectron2.structures.instances import Instances
DENSEPOSE_AVAILABLE = True
except ImportError:
DENSEPOSE_AVAILABLE = False
DensePoseOutputsTextureVisualizer = None # dummies for static defs
DensePoseOutputsVertexVisualizer = None
get_texture_atlas = lambda x: None # noqa: E731
from loguru import logger
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from zamba.data.video import load_video_frames
from zamba.models.utils import RegionEnum, download_weights
from zamba.settings import get_model_cache_dir
MODELS = dict(
animals=dict(
config=str(
Path(__file__).parent
/ "assets"
/ "densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_16k.yaml"
),
densepose_weights_url="https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_16k/270727112/model_final_421d28.pkl",
weights="zamba_densepose_model_final_421d28.pkl",
viz_class=DensePoseOutputsVertexVisualizer,
viz_class_kwargs=dict(),
),
chimps=dict(
config=str(
Path(__file__).parent
/ "assets"
/ "densepose_rcnn_R_50_FPN_soft_chimps_finetune_4k.yaml"
),
densepose_weights_url="https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_chimps_finetune_4k/253146869/model_final_52f649.pkl",
weights="zamba_densepose_model_final_52f649.pkl",
viz_class=DensePoseOutputsTextureVisualizer,
viz_class_kwargs=dict(
texture_atlases_dict={
"chimp_5029": get_texture_atlas(
str(Path(__file__).parent / "assets" / "chimp_texture_colors_flipped.tif")
)
}
),
anatomy_color_mapping=str(Path(__file__).parent / "assets" / "chimp_5029_parts.csv"),
),
)
class DensePoseManager:
def __init__(
self,
model=MODELS["chimps"],
model_cache_dir: Optional[Path] = None,
download_region=RegionEnum("us"),
):
"""Create a DensePoseManager object.
Parameters
----------
model : dict, optional (default MODELS['chimps'])
A dictionary with the densepose model defintion like those defined in MODELS.
"""
if not DENSEPOSE_AVAILABLE:
raise ImportError(
"Densepose not installed. See: https://zamba.drivendata.org/docs/stable/models/densepose/#installation"
)
model_cache_dir = model_cache_dir or get_model_cache_dir()
# setup configuration for densepose
self.cfg = get_cfg()
add_densepose_config(self.cfg)
self.cfg.merge_from_file(model["config"])
if not (model_cache_dir / model["weights"]).exists():
logger.info(f"Available weights: {list(model_cache_dir.glob('*'))}")
logger.info(f"Downloading weights {model['weights']} to {model_cache_dir}")
model_cache_dir.mkdir(parents=True, exist_ok=True)
self.cfg.MODEL.WEIGHTS = download_weights(
model["weights"], model_cache_dir, download_region
)
else:
self.cfg.MODEL.WEIGHTS = str(model_cache_dir / model["weights"])
# automatically use CPU if no cuda available
if not torch.cuda.is_available():
self.cfg.MODEL.DEVICE = "cpu"
self.cfg.freeze()
logging.getLogger("fvcore").setLevel("CRITICAL") # silence noisy detectron2 logging
# set up predictor with the configuration
self.predictor = DefaultPredictor(self.cfg)
# we have a specific texture atlas for chimps with relevant regions
# labeled that we can use instead of the default segmentation
self.visualizer = model["viz_class"](
self.cfg,
device=self.cfg.MODEL.DEVICE,
**model.get("viz_class_kwargs", {}),
)
# set up utilities for use with visualizer
self.vis_extractor = create_extractor(self.visualizer)
self.vis_embedder = build_densepose_embedder(self.cfg)
self.vis_class_to_mesh_name = get_class_to_mesh_name_mapping(self.cfg)
self.vis_mesh_vertex_embeddings = {
mesh_name: self.vis_embedder(mesh_name).to(self.cfg.MODEL.DEVICE)
for mesh_name in self.vis_class_to_mesh_name.values()
if self.vis_embedder.has_embeddings(mesh_name)
}
if "anatomy_color_mapping" in model:
self.anatomy_color_mapping = pd.read_csv(model["anatomy_color_mapping"], index_col=0)
else:
self.anatomy_color_mapping = None
def predict_image(self, image):
"""Run inference to get the densepose results for an image.
Parameters
----------
image :
numpy array (unit8) of an image in BGR format or path to an image
Returns
-------
tuple
Returns the image array as passed or loaded and the the densepose Instances as results.
"""
if isinstance(image, (str, Path)):
image = read_image(image, format="BGR")
return image, self.predict(image)
def predict_video(self, video, video_loader_config=None, pbar=True):
"""Run inference to get the densepose results for a video.
Parameters
----------
video :
numpy array (uint8) of a a video in BGR layout with time dimension first or path to a video
video_loader_config : VideoLoaderConfig, optional
A video loader config for loading videos (uses all defaults except pix_fmt="bgr24")
pbar : bool, optional
Whether to display a progress bar, by default True
Returns
-------
tuple
Tuple of (video_array, list of densepose results per frame)
"""
if isinstance(video, (str, Path)):
video = load_video_frames(video, config=video_loader_config)
pbar = tqdm if pbar else lambda x, **kwargs: x
return video, [
self.predict_image(img)[1] for img in pbar(video, desc="Frames")
] # just the predictions
def predict(self, image_arr):
"""Main call to DensePose for inference. Runs inference on an image array.
Parameters
----------
image_arr : numpy array
BGR image array
Returns
-------
Instances
Detection instances with boxes, scores, and densepose estimates.
"""
with torch.no_grad():
instances = self.predictor(image_arr)["instances"]
return instances
def serialize_video_output(self, instances, filename=None, write_embeddings=False):
serialized = {
"frames": [
self.serialize_image_output(
frame_instances, filename=None, write_embeddings=write_embeddings
)
for frame_instances in instances
]
}
if filename is not None:
with Path(filename).open("w") as f:
json.dump(serialized, f, indent=2)
return serialized
def serialize_image_output(self, instances, filename=None, write_embeddings=False):
"""Convert the densepose output into Python-native objects that can
be written and read with json.
Parameters
----------
instances : Instance
The output from the densepose model
filename : (str, Path), optional
If not None, the filename to write the output to, by default None
"""
if isinstance(instances, list):
img_height, img_width = instances[0].image_size
else:
img_height, img_width = instances.image_size
boxes = instances.get("pred_boxes").tensor
scores = instances.get("scores").tolist()
labels = instances.get("pred_classes").tolist()
try:
pose_result = instances.get("pred_densepose")
except KeyError:
pose_result = None
# include embeddings + segmentation if they exist and they are requested
write_embeddings = write_embeddings and (pose_result is not None)
serialized = {
"instances": [
{
"img_height": img_height,
"img_width": img_width,
"box": boxes[i].cpu().tolist(),
"score": scores[i],
"label": {
"value": labels[i],
"mesh_name": self.vis_class_to_mesh_name[labels[i]],
},
"embedding": (
pose_result.embedding[[i], ...].cpu().tolist()
if write_embeddings
else None
),
"segmentation": (
pose_result.coarse_segm[[i], ...].cpu().tolist()
if write_embeddings
else None
),
}
for i in range(len(instances))
]
}
if filename is not None:
with Path(filename).open("w") as f:
json.dump(serialized, f, indent=2)
return serialized
def deserialize_output(self, instances_dict=None, filename=None):
if filename is not None:
with Path(filename).open("r") as f:
instances_dict = json.load(f)
# handle image case
is_image = False
if "frames" not in instances_dict:
instances_dict = {"frames": [instances_dict]}
is_image = True
frames = []
for frame in instances_dict["frames"]:
heights, widths, boxes, scores, labels, embeddings, segmentations = zip(
*[
(
i["img_height"],
i["img_width"],
i["box"],
i["score"],
i["label"]["value"],
i["embedding"] if i["embedding"] is not None else [np.nan],
i["segmentation"] if i["segmentation"] is not None else [np.nan],
)
for i in frame["instances"]
]
)
frames.append(
Instances(
(heights[0], widths[0]),
pred_boxes=boxes,
scores=scores,
pred_classes=labels,
pred_densepose=DensePoseEmbeddingPredictorOutput(
embedding=torch.tensor(embeddings),
coarse_segm=torch.tensor(segmentations),
),
)
)
# if image or single frame, just return the instance
if is_image:
return frames[0]
else:
return frames
def visualize_image(self, image_arr, outputs, output_path=None):
"""Visualize the pose information.
Parameters
----------
image_arr : numpy array (unit8) BGR
The numpy array representing the image.
outputs :
The outputs from running DensePoseManager.predict*
output_path : str or Path, optional
If not None, write visualization to this path; by default None
Returns
-------
numpy array (unit8) BGR
DensePose outputs visualized on top of the image.
"""
bw_image = cv2.cvtColor(image_arr, cv2.COLOR_BGR2GRAY)
bw_image = np.tile(bw_image[:, :, np.newaxis], [1, 1, 3])
data = self.vis_extractor(outputs)
image_vis = self.visualizer.visualize(bw_image, data)
if output_path is not None:
cv2.imwrite(str(output_path), image_vis)
return image_vis
def anatomize_image(self, visualized_img_arr, outputs, output_path=None):
"""Convert the pose information into the percent of pixels in the detection
bounding box that correspond to each part of the anatomy in an image.
Parameters
----------
visualized_img_arr : numpy array (unit8) BGR
The numpy array the image after the texture has been visualized (by calling DensePoseManager.visualize_image).
outputs :
The outputs from running DensePoseManager.predict*
Returns
-------
pandas.DataFrame
DataFrame with percent of pixels of the bounding box that correspond to each anatomical part
"""
if self.anatomy_color_mapping is None:
raise ValueError(
"No anatomy_color_mapping provided to track anatomy; did you mean to use a different MODEL?"
)
# no detections, return empty df for joining later (e.g., in anatomize_video)
if not outputs:
return pd.DataFrame([])
_, _, N, bboxes_xywh, pred_classes = self.visualizer.extract_and_check_outputs_and_boxes(
self.vis_extractor(outputs)
)
all_detections = []
for n in range(N):
x, y, w, h = bboxes_xywh[n].int().cpu().numpy()
detection_area = visualized_img_arr[y : y + h, x : x + w]
detection_stats = {
name: (detection_area == np.array([[[color.B, color.G, color.R]]]))
.all(axis=-1)
.sum()
/ (h * w) # calc percent of bounding box with this color
for name, color in self.anatomy_color_mapping.iterrows()
}
detection_stats["x"] = x
detection_stats["y"] = y
detection_stats["h"] = h
detection_stats["w"] = w
all_detections.append(detection_stats)
results = pd.DataFrame(all_detections)
if output_path is not None:
results.to_csv(output_path, index=False)
return results
def visualize_video(
self, video_arr, outputs, output_path=None, frame_size=None, fps=30, pbar=True
):
"""Visualize the pose information on a video
Parameters
----------
video_arr : numpy array (unit8) BGR, time first
The numpy array representing the video.
outputs :
The outputs from running DensePoseManager.predict*
output_path : str or Path, optional
If not None, write visualization to this path (should be .mp4); by default None
frame_size : (innt, float), optional
If frame_size is float, scale up or down by that float value; if frame_size is an integer,
set width to that size and scale height appropriately.
fps : int
frames per second for output video if writing; defaults to 30
pbar : bool
display a progress bar
Returns
-------
numpy array (unit8) BGR
DensePose outputs visualized on top of the image.
"""
pbar = tqdm if pbar else lambda x, **kwargs: x
out_frames = np.array(
[
self.visualize_image(
image_arr,
output,
)
for image_arr, output in pbar(
zip(video_arr, outputs), total=video_arr.shape[0], desc="Visualize frames"
)
]
)
if output_path is not None:
# get new size for output video if scaling
if frame_size is None:
frame_size = video_arr.shape[2] # default to same size
# if float, scale as a multiple
if isinstance(frame_size, float):
frame_width = round(video_arr.shape[2] * frame_size)
frame_height = round(video_arr.shape[1] * frame_size)
# if int, use as width of the video and scale height proportionally
elif isinstance(frame_size, int):
frame_width = frame_size
scale = frame_width / video_arr.shape[2]
frame_height = round(video_arr.shape[1] * scale)
# setup output for writing
output_path = output_path.with_suffix(".mp4")
out = cv2.VideoWriter(
str(output_path),
cv2.VideoWriter_fourcc(*"mp4v"),
max(1, int(fps)),
(frame_width, frame_height),
)
for f in pbar(out_frames, desc="Write frames"):
if (f.shape[0] != frame_height) or (f.shape[1] != frame_width):
f = cv2.resize(
f,
(frame_width, frame_height),
# https://stackoverflow.com/a/51042104/1692709
interpolation=(
cv2.INTER_LINEAR if f.shape[1] < frame_width else cv2.INTER_AREA
),
)
out.write(f)
out.release()
return out_frames
def anatomize_video(self, visualized_video_arr, outputs, output_path=None, fps=30):
"""Convert the pose information into the percent of pixels in the detection
bounding box that correspond to each part of the anatomy in a video.
Parameters
----------
visualized_video_arr : numpy array (unit8) BGR
The numpy array the video after the texture has been visualized (by calling DensePoseManager.visualize_video).
outputs :
The outputs from running DensePoseManager.predict*
Returns
-------
numpy array (unit8) BGR
DensePose outputs visualized on top of the image.
"""
all_detections = []
for ix in range(visualized_video_arr.shape[0]):
detection_df = self.anatomize_image(visualized_video_arr[ix, ...], outputs[ix])
detection_df["frame"] = ix
detection_df["seconds"] = ix / fps
all_detections.append(detection_df)
results = pd.concat(all_detections)
if output_path is not None:
results.to_csv(output_path, index=False)
return results
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/densepose/config.py | zamba/models/densepose/config.py | from enum import Enum
import os
from pathlib import Path
from typing import Optional
from loguru import logger
import pandas as pd
from pydantic.class_validators import root_validator, validator
from tqdm import tqdm
from zamba.data.video import VideoLoaderConfig
from zamba.models.config import (
ZambaBaseModel,
check_files_exist_and_load,
get_video_filepaths,
validate_model_cache_dir,
)
from zamba.models.densepose.densepose_manager import MODELS, DensePoseManager
from zamba.models.utils import RegionEnum
class DensePoseOutputEnum(Enum):
segmentation = "segmentation"
chimp_anatomy = "chimp_anatomy"
class DensePoseConfig(ZambaBaseModel):
"""Configuration for running dense pose on videos.
Args:
video_loader_config (VideoLoaderConfig): Configuration for loading videos
output_type (str): one of DensePoseOutputEnum (currently "segmentation" or "chimp_anatomy").
render_output (bool): Whether to save a version of the video with the output overlaid on top.
Defaults to False.
embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the
DensePose result. Setting to True can result in large json files. Defaults to False.
data_dir (Path): Where to find the files listed in filepaths (or where to look if
filepaths is not provided).
filepaths (Path, optional): Path to a CSV file with a list of filepaths to process.
save_dir (Path, optional): Directory for where to save the output files;
defaults to os.getcwd().
cache_dir (Path, optional): Path for downloading and saving model weights. Defaults
to env var `MODEL_CACHE_DIR` or the OS app cache dir.
weight_download_region (RegionEnum, optional): region where to download weights; should
be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'.
"""
video_loader_config: VideoLoaderConfig
output_type: DensePoseOutputEnum
render_output: bool = False
embeddings_in_json: bool = False
data_dir: Path
filepaths: Optional[Path] = None
save_dir: Optional[Path] = None
cache_dir: Optional[Path] = None
weight_download_region: RegionEnum = RegionEnum("us")
_validate_cache_dir = validator("cache_dir", allow_reuse=True, always=True)(
validate_model_cache_dir
)
def run_model(self):
"""Use this configuration to execute DensePose via the DensePoseManager"""
if not isinstance(self.output_type, DensePoseOutputEnum):
self.output_type = DensePoseOutputEnum(self.output_type)
if self.output_type == DensePoseOutputEnum.segmentation.value:
model = MODELS["animals"]
elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value:
model = MODELS["chimps"]
else:
raise Exception(f"invalid {self.output_type}")
output_dir = Path(os.getcwd()) if self.save_dir is None else self.save_dir
dpm = DensePoseManager(
model, model_cache_dir=self.cache_dir, download_region=self.weight_download_region
)
for fp in tqdm(self.filepaths.filepath, desc="Videos"):
fp = Path(fp)
vid_arr, labels = dpm.predict_video(fp, video_loader_config=self.video_loader_config)
# serialize the labels generated by densepose to json
output_path = output_dir / f"{fp.stem}_denspose_labels.json"
dpm.serialize_video_output(
labels, filename=output_path, write_embeddings=self.embeddings_in_json
)
# re-render the video with the densepose labels visualized on top of the video
if self.render_output:
output_path = output_dir / f"{fp.stem}_denspose_video{''.join(fp.suffixes)}"
visualized_video = dpm.visualize_video(
vid_arr, labels, output_path=output_path, fps=self.video_loader_config.fps
)
# write out the anatomy present in each frame to a csv for later analysis
if self.output_type == DensePoseOutputEnum.chimp_anatomy.value:
output_path = output_dir / f"{fp.stem}_denspose_anatomy.csv"
dpm.anatomize_video(
visualized_video,
labels,
output_path=output_path,
fps=self.video_loader_config.fps,
)
_get_filepaths = root_validator(allow_reuse=True, pre=False, skip_on_failure=True)(
get_video_filepaths
)
@root_validator(skip_on_failure=True)
def validate_files(cls, values):
# if globbing from data directory, already have valid dataframe
if isinstance(values["filepaths"], pd.DataFrame):
files_df = values["filepaths"]
else:
# make into dataframe even if only one column for clearer indexing
files_df = pd.DataFrame(pd.read_csv(values["filepaths"]))
if "filepath" not in files_df.columns:
raise ValueError(f"{values['filepaths']} must contain a `filepath` column.")
# can only contain one row per filepath
duplicated = files_df.filepath.duplicated()
if duplicated.sum() > 0:
logger.warning(
f"Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video."
)
files_df = files_df[["filepath"]].drop_duplicates()
values["filepaths"] = check_files_exist_and_load(
df=files_df,
data_dir=values["data_dir"],
skip_load_validation=True,
)
return values
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/densepose/__init__.py | zamba/models/densepose/__init__.py | from .densepose_manager import DensePoseManager, MODELS # noqa
from .config import DensePoseConfig, DensePoseOutputEnum # noqa
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/depth_estimation/depth_manager.py | zamba/models/depth_estimation/depth_manager.py | from loguru import logger
import numpy as np
import pandas as pd
from pathlib import Path
import torch
import torch.utils
import torch.utils.data
from torchvision import transforms
from torchvision.transforms import Resize
from tqdm import tqdm
from zamba.data.video import load_video_frames
from zamba.models.utils import RegionEnum, download_weights
from zamba.object_detection.yolox.megadetector_lite_yolox import MegadetectorLiteYoloX
from zamba.pytorch.transforms import ConvertHWCtoCHW
MODELS = dict(
depth=dict(
private_weights_url="s3://drivendata-client-zamba/depth_estimation_winner_weights/second_place/tf_efficientnetv2_l_in21k_2_5_pl4/model_best.pt",
weights="zamba_depth_30aaa90525.pt",
)
)
def depth_transforms(size):
return transforms.Compose(
[
# put channels first
ConvertHWCtoCHW(),
# resize to desired height and width
Resize(size),
]
)
class DepthDataset(torch.utils.data.Dataset):
def __init__(self, filepaths):
# these are hardcoded because they depend on the trained model weights used for inference
self.height = 270
self.width = 480
self.channels = 3
self.window_size = 2
# first frames are swapped; this maintains the bug in the original code
self.order = [-1, -2, 0, 1, 2]
self.num_frames = self.window_size * 2 + 1
self.fps = 1
mdlite = MegadetectorLiteYoloX()
cached_frames = dict()
detection_dict = dict()
transform = depth_transforms(size=(self.height, self.width))
logger.info(f"Running object detection on {len(filepaths)} videos.")
for video_filepath in tqdm(filepaths):
# get video array at 1 fps, use full size for detecting objects
logger.debug(f"Loading video: {video_filepath}")
try:
arr = load_video_frames(video_filepath, fps=self.fps)
except: # noqa: E722
logger.warning(f"Video {video_filepath} could not be loaded. Skipping.")
continue
# add video entry to cached dict with length (number of seconds since fps=1)
cached_frames[video_filepath] = dict(video_length=len(arr))
# get detections in each frame
logger.debug(f"Detecting video: {video_filepath}")
detections_per_frame = mdlite.detect_video(video_arr=arr)
# iterate over frames
for frame_idx, (detections, scores) in enumerate(detections_per_frame):
# if anything is detected in the frame, save out relevant frames
if len(detections) > 0:
logger.debug(f"{len(detections)} detection(s) found at second {frame_idx}.")
# get frame indices around frame with detection
min_frame = frame_idx - self.window_size
max_frame = frame_idx + self.window_size
# add relevant resized frames to dict if not already added
# if index is before start or after end of video, use an array of zeros
for i in range(min_frame, max_frame + 1):
if f"frame_{i}" not in cached_frames[video_filepath].keys():
try:
selected_frame = arr[i]
except: # noqa: E722
selected_frame = np.zeros(
(self.height, self.width, self.channels), dtype=int
)
# transform puts channels first and resizes
cached_frames[video_filepath][f"frame_{i}"] = transform(
torch.tensor(selected_frame)
).numpy()
del selected_frame
# iterate over detections in frame to create universal detection ID
for i, (detection, score) in enumerate(zip(detections, scores)):
universal_det_id = f"{i}_{frame_idx}_{video_filepath}"
# save out bounding box and score info in case we want to mask out portions
detection_dict[universal_det_id] = dict(
bbox=detection,
score=score,
frame=frame_idx,
video=video_filepath,
)
del arr
del detections_per_frame
self.detection_dict = detection_dict
self.detection_indices = list(detection_dict.keys())
self.cached_frames = cached_frames
def __len__(self):
return len(self.detection_indices)
def __getitem__(self, index):
"""Given a detection index, returns a tuple containing the tensor of stacked frames,
video filename, and time into the video for the target frame.
"""
# get detection info
detection_idx = self.detection_indices[index]
det_metadata = self.detection_dict[detection_idx]
det_frame = det_metadata["frame"]
det_video = det_metadata["video"]
# set up input array of frames within window of detection
# frames are stacked channel-wise
input = np.concatenate(
[self.cached_frames[det_video][f"frame_{det_frame + i}"] for i in self.order]
)
# to tensor and normalize
tensor = torch.from_numpy(input) / 255.0
# keep track of video name and time as well
return tensor, det_video, det_frame
class DepthEstimationManager:
def __init__(
self,
model_cache_dir: Path,
gpus: int,
weight_download_region: RegionEnum = RegionEnum("us"),
batch_size: int = 64,
tta: int = 2,
num_workers: int = 8,
):
"""Create a depth estimation manager object
Args:
model_cache_dir (Path): Path for downloading and saving model weights.
gpus (int): Number of GPUs to use for inference.
weight_download_region (str): s3 region to download pretrained weights from.
Options are "us" (United States), "eu" (Europe), or "asia" (Asia Pacific).
Defaults to "us".
batch_size (int, optional): Batch size to use for inference. Defaults to 64.
Note: a batch is a set of frames, not videos, for the depth model.
tta (int, optional): Number of flips to apply for test time augmentation.
num_workers (int): Number of subprocesses to use for data loading. The maximum value is
the number of CPUs in the system. Defaults to 8.
"""
self.batch_size = batch_size
self.tta = tta
self.num_workers = num_workers
self.gpus = gpus
model = MODELS["depth"]
self.model_weights = model_cache_dir / model["weights"]
if not self.model_weights.exists():
model_cache_dir.mkdir(parents=True, exist_ok=True)
self.model_weights = download_weights(
model["weights"], model_cache_dir, weight_download_region
)
if self.gpus > 0:
self.device = "cuda"
else:
self.device = "cpu"
def predict(self, filepaths):
"""Generate predictions for a list of video filepaths."""
# load model
model = torch.jit.load(self.model_weights, map_location=self.device).eval()
# load dataset
test_dataset = DepthDataset(filepaths)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=self.batch_size,
shuffle=False,
sampler=None,
collate_fn=None,
num_workers=self.num_workers,
pin_memory=False,
persistent_workers=True,
)
logger.info("Generating depth predictions for detected animals.")
predictions = []
with torch.no_grad():
with tqdm(test_loader) as pbar:
distance: torch.Tensor = torch.zeros(self.batch_size, device=self.device)
for image, filepath, time in pbar:
bs = image.size(0)
image = image.to(self.device, non_blocking=True)
distance.zero_()
logits = model(image)
logits = logits.squeeze(1)
distance[:bs] += logits
if self.tta > 1:
logits = model(torch.flip(image, dims=[-1]))
logits = logits.squeeze(1)
distance[:bs] += logits
distance /= self.tta
time = time.numpy()
for d, vid, t in zip(distance.cpu().numpy(), filepath, time):
predictions.append((vid, t, d))
predictions = pd.DataFrame(
predictions,
columns=["filepath", "time", "distance"],
).round(
{"distance": 1}
) # round to useful number of decimal places
logger.info("Processing output.")
# post process to add nans for frames where no animal was detected
videos = list(test_dataset.cached_frames.keys())
lengths = [np.arange(test_dataset.cached_frames[v]["video_length"]) for v in videos]
# create one row per frame for duration of video
output = pd.Series(index=videos, data=lengths).explode().to_frame().reset_index()
output.columns = ["filepath", "time"]
# merge in predictions
if len(predictions) > 0:
output = output.merge(predictions, on=["filepath", "time"], how="outer")
else:
# create empty distance column
output["distance"] = np.nan
return output
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/depth_estimation/config.py | zamba/models/depth_estimation/config.py | import os
from pathlib import Path
from loguru import logger
import pandas as pd
from pydantic import DirectoryPath, FilePath, validator, root_validator
from typing import Optional, Union
from zamba.models.config import (
ZambaBaseModel,
check_files_exist_and_load,
get_video_filepaths,
GPUS_AVAILABLE,
validate_gpus,
validate_model_cache_dir,
)
from zamba.models.depth_estimation.depth_manager import DepthEstimationManager
from zamba.models.utils import RegionEnum
class DepthEstimationConfig(ZambaBaseModel):
"""Configuration for running depth estimation on videos.
Args:
filepaths (FilePath, optional): Path to a CSV containing videos for inference, with one row per
video in the data_dir. There must be a column called 'filepath' (absolute or
relative to the data_dir). If None, uses all files in data_dir. Defaults to None.
data_dir (DirectoryPath): Path to a directory containing videos for inference.
Defaults to the working directory.
save_to (Path or DirectoryPath, optional): Either a filename or a directory in which to
save the output csv. If a directory is provided, predictions will be saved to
depth_predictions.csv in that directory. Defaults to the working directory.
overwrite (bool): If True, overwrite output csv path if it exists. Defaults to False.
batch_size (int): Batch size to use for inference. Defaults to 64. Note: a batch is a set
of frames, not videos, for the depth model.
model_cache_dir (Path, optional): Path for downloading and saving model weights.
Defaults to env var `MODEL_CACHE_DIR` or the OS app cache dir.
weight_download_region (str): s3 region to download pretrained weights from. Options are
"us" (United States), "eu" (Europe), or "asia" (Asia Pacific). Defaults to "us".
num_workers (int): Number of subprocesses to use for data loading. The maximum value is
the number of CPUs in the system. Defaults to 8.
gpus (int): Number of GPUs to use for inference. Defaults to all of the available GPUs
found on the machine.
"""
filepaths: Optional[Union[FilePath, pd.DataFrame]] = None
data_dir: DirectoryPath = ""
save_to: Optional[Path] = None
overwrite: bool = False
batch_size: int = 64
model_cache_dir: Optional[Path] = None
weight_download_region: RegionEnum = RegionEnum("us")
num_workers: int = 8
gpus: int = GPUS_AVAILABLE
class Config:
# support pandas dataframe
arbitrary_types_allowed = True
def run_model(self):
dm = DepthEstimationManager(
model_cache_dir=self.model_cache_dir,
batch_size=self.batch_size,
weight_download_region=self.weight_download_region,
num_workers=self.num_workers,
gpus=self.gpus,
)
predictions = dm.predict(self.filepaths)
self.save_to.parent.mkdir(parents=True, exist_ok=True)
predictions.to_csv(self.save_to, index=False)
logger.info(f"Depth predictions saved to {self.save_to}")
_get_filepaths = root_validator(allow_reuse=True, pre=False, skip_on_failure=True)(
get_video_filepaths
)
_validate_cache_dir = validator("model_cache_dir", allow_reuse=True, always=True)(
validate_model_cache_dir
)
_validate_gpus = validator("gpus", allow_reuse=True, pre=True)(validate_gpus)
@root_validator(skip_on_failure=True)
def validate_save_to(cls, values):
save_to = values["save_to"]
if save_to is None:
save_path = Path(os.getcwd()) / "depth_predictions.csv"
elif save_to.suffix:
save_path = save_to
else:
save_path = save_to / "depth_predictions.csv"
if save_path.exists() and not values["overwrite"]:
raise ValueError(
f"{save_path} already exists. If you would like to overwrite, set overwrite=True."
)
values["save_to"] = save_path
return values
@root_validator(skip_on_failure=True)
def validate_files(cls, values):
# if globbing from data directory, already have valid dataframe
if isinstance(values["filepaths"], pd.DataFrame):
files_df = values["filepaths"]
else:
# make into dataframe even if only one column for clearer indexing
files_df = pd.DataFrame(pd.read_csv(values["filepaths"]))
if "filepath" not in files_df.columns:
raise ValueError(f"{values['filepaths']} must contain a `filepath` column.")
# can only contain one row per filepath
duplicated = files_df.filepath.duplicated()
if duplicated.sum() > 0:
logger.warning(
f"Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates."
)
files_df = files_df[["filepath"]].drop_duplicates()
values["filepaths"] = check_files_exist_and_load(
df=files_df,
data_dir=values["data_dir"],
skip_load_validation=True, # just check files exist
).filepath.values.tolist()
return values
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/models/depth_estimation/__init__.py | zamba/models/depth_estimation/__init__.py | from .depth_manager import DepthDataset, DepthEstimationManager, MODELS # noqa
from .config import DepthEstimationConfig # noqa
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/pytorch/dataloaders.py | zamba/pytorch/dataloaders.py | from typing import Optional, Tuple
import warnings
from loguru import logger
import numpy as np
import pandas as pd
from pandas_path import path # noqa: F401
import torch
import torchvision.datasets.video_utils
from torchvision.datasets.vision import VisionDataset
import torchvision.transforms.transforms
from zamba.data.video import npy_cache, load_video_frames, VideoLoaderConfig
def get_datasets(
train_metadata: Optional[pd.DataFrame] = None,
predict_metadata: Optional[pd.DataFrame] = None,
transform: Optional[torchvision.transforms.transforms.Compose] = None,
video_loader_config: Optional[VideoLoaderConfig] = None,
) -> Tuple[
Optional["FfmpegZambaVideoDataset"],
Optional["FfmpegZambaVideoDataset"],
Optional["FfmpegZambaVideoDataset"],
Optional["FfmpegZambaVideoDataset"],
]:
"""Gets training and/or prediction datasets.
Args:
train_metadata (pathlike, optional): Path to a CSV or DataFrame with columns:
- filepath: path to a video, relative to `video_dir`
- label:, label of the species that appears in the video
- split (optional): If provided, "train", "val", or "holdout" indicating which dataset
split the video will be included in. If not provided, and a "site" column exists,
generate a site-specific split. Otherwise, generate a random split using
`split_proportions`.
- site (optional): If no "split" column, generate a site-specific split using the values
in this column.
predict_metadata (pathlike, optional): Path to a CSV or DataFrame with a "filepath" column.
transform (torchvision.transforms.transforms.Compose, optional)
video_loader_config (VideoLoaderConfig, optional)
Returns:
A tuple of (train_dataset, val_dataset, test_dataset, predict_dataset) where each dataset
can be None if not specified.
"""
if predict_metadata is not None:
# enable filtering the same way on all datasets
predict_metadata["species_"] = 0
def subset_metadata_or_none(
metadata: Optional[pd.DataFrame] = None, subset: Optional[str] = None
) -> Optional[pd.DataFrame]:
if metadata is None:
return None
else:
metadata_subset = metadata.loc[metadata.split == subset] if subset else metadata
if len(metadata_subset) > 0:
return FfmpegZambaVideoDataset(
annotations=metadata_subset.set_index("filepath").filter(regex="species"),
transform=transform,
video_loader_config=video_loader_config,
)
else:
return None
train_dataset = subset_metadata_or_none(train_metadata, "train")
val_dataset = subset_metadata_or_none(train_metadata, "val")
test_dataset = subset_metadata_or_none(train_metadata, "holdout")
predict_dataset = subset_metadata_or_none(predict_metadata)
return train_dataset, val_dataset, test_dataset, predict_dataset
class FfmpegZambaVideoDataset(VisionDataset):
def __init__(
self,
annotations: pd.DataFrame,
transform: Optional[torchvision.transforms.transforms.Compose] = None,
video_loader_config: Optional[VideoLoaderConfig] = None,
):
self.original_indices = annotations.index
self.video_paths = annotations.index.tolist()
self.species = [s.split("species_", 1)[1] for s in annotations.columns]
self.targets = annotations
self.transform = transform
# get environment variable for cache if it exists
if video_loader_config is None:
video_loader_config = VideoLoaderConfig()
self.video_loader_config = video_loader_config
super().__init__(root=None, transform=transform)
def __len__(self):
return len(self.video_paths)
def __getitem__(self, index: int):
try:
cached_load_video_frames = npy_cache(
cache_path=self.video_loader_config.cache_dir,
cleanup=self.video_loader_config.cleanup_cache,
)(load_video_frames)
video = cached_load_video_frames(
filepath=self.video_paths[index], config=self.video_loader_config
)
except Exception as e:
if isinstance(e, IndexError):
raise
# show ffmpeg error
logger.debug(e)
logger.warning(
f"Video {self.video_paths[index]} could not be loaded. Using an array of all zeros instead."
)
video = np.zeros(
(
self.video_loader_config.total_frames,
(
self.video_loader_config.model_input_height
if self.video_loader_config.model_input_height is not None
else self.video_loader_config.frame_selection_height
),
(
self.video_loader_config.model_input_width
if self.video_loader_config.model_input_width is not None
else self.video_loader_config.frame_selection_width
),
3,
),
dtype="int",
)
# ignore pytorch warning about non-writeable tensors
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
video = torch.from_numpy(video)
if self.transform is not None:
video = self.transform(video)
target = self.targets.iloc[index]
target = torch.tensor(target).float()
return video, target
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/pytorch/finetuning.py | zamba/pytorch/finetuning.py | from typing import Optional
import pytorch_lightning as pl
def multiplier_factory(rate: float):
"""Returns a function that returns a constant value for use in computing a constant learning
rate multiplier.
Args:
rate (float): Constant multiplier.
"""
def multiplier(*args, **kwargs):
return rate
return multiplier
class BackboneFinetuning(pl.callbacks.finetuning.BackboneFinetuning):
r"""
Derived from PTL's built-in ``BackboneFinetuning``, but during the backbone freeze phase,
choose whether to freeze batch norm layers, even if ``train_bn`` is True (i.e., even if we train them
during the backbone unfreeze phase).
Finetune a backbone model based on a learning rate user-defined scheduling.
When the backbone learning rate reaches the current model learning rate
and ``should_align`` is set to True, it will align with it for the rest of the training.
Args:
unfreeze_backbone_at_epoch: Epoch at which the backbone will be unfreezed.
lambda_func: Scheduling function for increasing backbone learning rate.
backbone_initial_ratio_lr:
Used to scale down the backbone learning rate compared to rest of model
backbone_initial_lr: Optional, Inital learning rate for the backbone.
By default, we will use current_learning / backbone_initial_ratio_lr
should_align: Wheter to align with current learning rate when backbone learning
reaches it.
initial_denom_lr: When unfreezing the backbone, the intial learning rate will
current_learning_rate / initial_denom_lr.
train_bn: Wheter to make Batch Normalization trainable.
verbose: Display current learning rate for model and backbone
round: Precision for displaying learning rate
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import BackboneFinetuning
>>> multiplicative = lambda epoch: 1.5
>>> backbone_finetuning = BackboneFinetuning(200, multiplicative)
>>> trainer = Trainer(callbacks=[backbone_finetuning])
"""
def __init__(
self, *args, multiplier: Optional[float] = 1, pre_train_bn: bool = False, **kwargs
):
if multiplier is not None:
kwargs["lambda_func"] = multiplier_factory(multiplier)
super().__init__(*args, **kwargs)
# choose whether to train batch norm layers prior to finetuning phase
self.pre_train_bn = pre_train_bn
def freeze_before_training(self, pl_module: "pl.LightningModule"):
self.freeze(pl_module.backbone, train_bn=self.pre_train_bn)
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/pytorch/utils.py | zamba/pytorch/utils.py | from typing import Optional, Tuple
import torch
def build_multilayer_perceptron(
input_size: int,
hidden_layer_sizes: Optional[Tuple[int]],
output_size: int,
activation: Optional[torch.nn.Module] = torch.nn.ReLU,
dropout: Optional[float] = None,
output_dropout: Optional[float] = None,
output_activation: Optional[torch.nn.Module] = None,
) -> torch.nn.Sequential:
"""Builds a multilayer perceptron.
Args:
input_size (int): Size of first input layer.
hidden_layer_sizes (tuple of int, optional): If provided, size of hidden layers.
output_size (int): Size of the last output layer.
activation (torch.nn.Module, optional): Activation layer between each pair of layers.
dropout (float, optional): If provided, insert dropout layers with the following dropout
rate in between each pair of layers.
output_dropout (float, optional): If provided, insert a dropout layer with the following
dropout rate before the output.
output_activation (torch.nn.Module, optional): Activation layer after the final layer.
Returns:
torch.nn.Sequential
"""
if (hidden_layer_sizes is None) or len(hidden_layer_sizes) == 0:
return torch.nn.Linear(input_size, output_size)
layers = [torch.nn.Linear(input_size, hidden_layer_sizes[0])]
if activation is not None:
layers.append(activation())
if (dropout is not None) and (dropout > 0):
layers.append(torch.nn.Dropout(dropout))
for in_size, out_size in zip(hidden_layer_sizes[:-1], hidden_layer_sizes[1:]):
layers.append(torch.nn.Linear(in_size, out_size))
if activation is not None:
layers.append(activation())
if (dropout is not None) and (dropout > 0):
layers.append(torch.nn.Dropout(dropout))
layers.append(torch.nn.Linear(hidden_layer_sizes[-1], output_size))
if (output_dropout is not None) and (output_dropout > 0):
layers.append(torch.nn.Dropout(dropout))
if output_activation is not None:
layers.append(output_activation())
return torch.nn.Sequential(*layers)
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/pytorch/__init__.py | zamba/pytorch/__init__.py | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false | |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/pytorch/transforms.py | zamba/pytorch/transforms.py | import itertools
from typing import Optional, Tuple
import torch
from torchvision import transforms
from torchvision.transforms import Normalize
class ConvertTHWCtoCTHW(torch.nn.Module):
"""Convert tensor from (0:T, 1:H, 2:W, 3:C) to (3:C, 0:T, 1:H, 2:W)"""
def forward(self, vid: torch.Tensor) -> torch.Tensor:
return vid.permute(3, 0, 1, 2)
class ConvertTHWCtoTCHW(torch.nn.Module):
"""Convert tensor from (T, H, W, C) to (T, C, H, W)"""
def forward(self, vid: torch.Tensor) -> torch.Tensor:
return vid.permute(0, 3, 1, 2)
class ConvertTCHWtoCTHW(torch.nn.Module):
"""Convert tensor from (T, C, H, W) to (C, T, H, W)"""
def forward(self, vid: torch.Tensor) -> torch.Tensor:
return vid.permute(1, 0, 2, 3)
class ConvertHWCtoCHW(torch.nn.Module):
"""Convert tensor from (0:H, 1:W, 2:C) to (2:C, 0:H, 1:W)"""
def forward(self, vid: torch.Tensor) -> torch.Tensor:
return vid.permute(2, 0, 1)
class Uint8ToFloat(torch.nn.Module):
def forward(self, tensor: torch.Tensor) -> torch.Tensor:
return tensor / 255.0
class VideotoImg(torch.nn.Module):
def forward(self, vid: torch.Tensor) -> torch.Tensor:
return vid.squeeze(0)
class PadDimensions(torch.nn.Module):
"""Pads a tensor to ensure a fixed output dimension for a give axis.
Attributes:
dimension_sizes: A tuple of int or None the same length as the number of dimensions in the
input tensor. If int, pad that dimension to at least that size. If None, do not pad.
"""
def __init__(self, dimension_sizes: Tuple[Optional[int]]):
super().__init__()
self.dimension_sizes = dimension_sizes
@staticmethod
def compute_left_and_right_pad(original_size: int, padded_size: int) -> Tuple[int, int]:
"""Computes left and right pad size.
Args:
original_size (list, int): The original tensor size
padded_size (list, int): The desired tensor size
Returns:
Tuple[int]: Pad size for right and left. For odd padding size, the right = left + 1
"""
if original_size >= padded_size:
return 0, 0
pad = padded_size - original_size
quotient, remainder = divmod(pad, 2)
return quotient, quotient + remainder
def forward(self, vid: torch.Tensor) -> torch.Tensor:
padding = tuple(
itertools.chain.from_iterable(
(
(0, 0)
if padded_size is None
else self.compute_left_and_right_pad(original_size, padded_size)
)
for original_size, padded_size in zip(vid.shape, self.dimension_sizes)
)
)
return torch.nn.functional.pad(vid, padding[::-1])
class PackSlowFastPathways(torch.nn.Module):
"""Creates the slow and fast pathway inputs for the slowfast model."""
def __init__(self, alpha: int = 4):
super().__init__()
self.alpha = alpha
def forward(self, frames: torch.Tensor):
fast_pathway = frames
# Perform temporal sampling from the fast pathway.
slow_pathway = torch.index_select(
frames,
1,
torch.linspace(0, frames.shape[1] - 1, frames.shape[1] // self.alpha).long(),
)
frame_list = [slow_pathway, fast_pathway]
return frame_list
imagenet_normalization_values = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def zamba_image_model_transforms(
single_frame=False, normalization_values=imagenet_normalization_values, channels_first=False
):
img_transforms = [
ConvertTHWCtoTCHW(),
Uint8ToFloat(),
transforms.Normalize(**imagenet_normalization_values),
]
if single_frame:
img_transforms += [VideotoImg()] # squeeze dim
if channels_first:
img_transforms += [ConvertTCHWtoCTHW()]
return transforms.Compose(img_transforms)
def slowfast_transforms():
return transforms.Compose(
[
ConvertTHWCtoTCHW(),
Uint8ToFloat(),
Normalize(mean=[0.45, 0.45, 0.45], std=[0.225, 0.225, 0.225]),
ConvertTCHWtoCTHW(),
PadDimensions((None, 32, None, None)),
PackSlowFastPathways(),
]
)
def resize_and_pad(image, desired_size):
width, height = image.size
if width > height:
new_width = desired_size
new_height = max(int(desired_size * (height / width)), 2)
else:
new_height = desired_size
new_width = max(int(desired_size * (width / height)), 2)
resized_image = image.resize((new_width, new_height))
padding_left = (desired_size - new_width) // 2
padding_right = desired_size - new_width - padding_left
padding_top = (desired_size - new_height) // 2
padding_bottom = desired_size - new_height - padding_top
padded_image = transforms.functional.pad(
resized_image, (padding_left, padding_top, padding_right, padding_bottom)
)
return padded_image
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/pytorch/layers.py | zamba/pytorch/layers.py | import torch
def _stack_tups(tuples, stack_dim=1):
"""Stack tuple of tensors along `stack_dim`
NOTE: vendored (with minor adaptations) from fastai:
https://github.com/fastai/fastai/blob/4b0785254fdece1a44859956b6e54eedb167a97e/fastai/layers.py#L505-L507
Updates:
- use `range` rather than fastai `range_of`
"""
return tuple(torch.stack([t[i] for t in tuples], dim=stack_dim) for i in range(len(tuples[0])))
class TimeDistributed(torch.nn.Module):
"""Applies `module` over `tdim` identically for each step, use `low_mem` to compute one at a time.
NOTE: vendored (with minor adaptations) from fastai:
https://github.com/fastai/fastai/blob/4b0785254fdece1a44859956b6e54eedb167a97e/fastai/layers.py#L510-L544
Updates:
- super.__init__() in init
- assign attributes in init
- inherit from torch.nn.Module rather than fastai.Module
"""
def __init__(self, module, low_mem=False, tdim=1):
super().__init__()
self.low_mem = low_mem
self.tdim = tdim
self.module = module
def forward(self, *tensors, **kwargs):
"input x with shape:(bs,seq_len,channels,width,height)"
if self.low_mem or self.tdim != 1:
return self.low_mem_forward(*tensors, **kwargs)
else:
# only support tdim=1
inp_shape = tensors[0].shape
bs, seq_len = inp_shape[0], inp_shape[1]
out = self.module(*[x.view(bs * seq_len, *x.shape[2:]) for x in tensors], **kwargs)
return self.format_output(out, bs, seq_len)
def low_mem_forward(self, *tensors, **kwargs):
"input x with shape:(bs,seq_len,channels,width,height)"
seq_len = tensors[0].shape[self.tdim]
args_split = [torch.unbind(x, dim=self.tdim) for x in tensors]
out = []
for i in range(seq_len):
out.append(self.module(*[args[i] for args in args_split]), **kwargs)
if isinstance(out[0], tuple):
return _stack_tups(out, stack_dim=self.tdim)
return torch.stack(out, dim=self.tdim)
def format_output(self, out, bs, seq_len):
"unstack from batchsize outputs"
if isinstance(out, tuple):
return tuple(out_i.view(bs, seq_len, *out_i.shape[1:]) for out_i in out)
return out.view(bs, seq_len, *out.shape[1:])
def __repr__(self):
return f"TimeDistributed({self.module})"
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/data/metadata.py | zamba/data/metadata.py | import itertools
from uuid import uuid4
from loguru import logger
import numpy as np
import pandas as pd
from typing import Dict, Optional, Union
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# From https://docs.python.org/3/library/itertools.html#recipes
# Recipe credited to George Sakkis
num_active = len(iterables)
nexts = itertools.cycle(iter(it).__next__ for it in iterables)
while num_active:
try:
for next in nexts:
yield next()
except StopIteration:
# Remove the iterator we just exhausted from the cycle.
num_active -= 1
nexts = itertools.cycle(itertools.islice(nexts, num_active))
def create_site_specific_splits(
site: pd.Series,
proportions: Dict[str, int],
random_state: Optional[Union[int, np.random.mtrand.RandomState]] = 989,
):
"""Splits sites into distinct groups whose sizes roughly matching the given proportions. Null
sites are randomly assigned to groups using the provided proportions.
Args:
site (pd.Series): A series of sites, one element per observation,
proportions (dict): A dict whose keys are the resulting groups and whose values are the
rough proportion of data in each group.
seed (int): Seed for random split of null sites.
Example:
Split data into groups where each site is in one and only one group with roughly 50-25-25
train-val-holdout proportions.
>>> create_site_specific_splits(site, proportions={"train": 2, "val": 1, "holdout": 1})
Returns:
pd.Series: A series containing the resulting split, one element per observation.
"""
assignments = {}
sites = site.value_counts(dropna=True).sort_values(ascending=False).index
n_subgroups = sum(proportions.values())
for i, subset in enumerate(
roundrobin(*([subset] * proportions[subset] for subset in proportions))
):
for group in sites[i::n_subgroups]:
assignments[group] = subset
# Divide null sites among the groups
null_sites = site.isnull()
if null_sites.sum() > 0:
logger.debug(f"{null_sites.sum():,} null sites randomly assigned to groups.")
null_groups = []
for group, group_proportion in proportions.items():
null_group = f"{group}-{uuid4()}"
null_groups.append(null_group)
assignments[null_group] = group
rng = (
np.random.RandomState(random_state) if isinstance(random_state, int) else random_state
)
site = site.copy()
site.loc[null_sites] = rng.choice(
null_groups,
p=np.asarray(list(proportions.values())) / sum(proportions.values()),
size=null_sites.sum(),
replace=True,
)
return site.replace(assignments)
def one_hot_to_labels(
one_hot: pd.DataFrame, column_prefix: Optional[str] = r"species_"
) -> pd.DataFrame:
if column_prefix:
one_hot = one_hot.filter(regex=column_prefix)
# remove prefix
one_hot.columns = [c.split(column_prefix, 1)[1] for c in one_hot.columns]
one_hot.index = one_hot.index.rename("filepath")
one_hot.columns = one_hot.columns.rename("label")
labels = (one_hot == 1).stack()
labels = labels[labels]
return labels.reset_index().drop(0, axis=1)
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/data/__init__.py | zamba/data/__init__.py | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false | |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/data/video.py | zamba/data/video.py | from fractions import Fraction
from functools import reduce
import hashlib
import json
from math import floor
import os
from pathlib import Path
import subprocess
from shutil import rmtree
import tempfile
from typing import Optional, Union, List
import cv2
from cloudpathlib import S3Path, AnyPath
import ffmpeg
from loguru import logger
import numpy as np
import pandas as pd
from pydantic import BaseModel, root_validator, validator
from zamba.exceptions import ZambaFfmpegException
from zamba.object_detection.yolox.megadetector_lite_yolox import (
MegadetectorLiteYoloX,
MegadetectorLiteYoloXConfig,
)
from zamba.settings import IMAGE_SUFFIXES
def ffprobe(path: os.PathLike) -> pd.Series:
def flatten_json(j, name=""):
for k in j:
if isinstance(j[k], dict):
yield from flatten_json(j[k], f"{name}.{k}")
elif isinstance(j[k], list):
for i in range(len(j[k])):
yield from flatten_json(j[k][i], f"{name}.{k}[{i}]")
else:
yield {f"{name}.{k}".strip("."): j[k]}
output = subprocess.check_output(
[
"ffprobe",
"-v",
"quiet",
"-show_entries",
"stream:format",
"-select_streams",
"v",
"-of",
"json",
path,
]
)
output = json.loads(output)
result = reduce(lambda a, b: {**a, **b}, flatten_json(output))
return pd.Series(result)
def get_video_stream(path: Union[os.PathLike, S3Path]) -> dict:
try:
probe = ffmpeg.probe(str(path))
except ffmpeg.Error as exc:
raise ZambaFfmpegException(exc.stderr)
return next((stream for stream in probe["streams"] if stream["codec_type"] == "video"), None)
def num_frames(stream_or_path: Union[dict, os.PathLike, S3Path]) -> Optional[int]:
if not isinstance(stream_or_path, dict):
stream = get_video_stream(stream_or_path)
else:
stream = stream_or_path
if not stream:
return
if "nb_frames" in stream:
return int(stream["nb_frames"])
if "duration" in stream:
duration = float(stream["duration"])
if "r_frame_rate" in stream:
frame_rate = float(Fraction(stream["r_frame_rate"]))
elif "avg_frame_rate" in stream:
frame_rate = float(stream["avg_frame_rate"])
duration -= float(stream.get("start_time", 0))
return floor(duration * frame_rate)
def ensure_frame_number(arr, total_frames: int):
"""Ensures the array contains the requested number of frames either by clipping frames from
the end or dulpicating the last frame.
Args:
arr (np.ndarray): Array of video frames with shape (frames, height, width, channel).
total_frames (int): Desired number of frames in output array.
"""
if (total_frames is None) or (arr.shape[0] == total_frames):
return arr
elif arr.shape[0] == 0:
logger.warning(
"No frames selected. Returning an array in the desired shape with all zeros."
)
return np.zeros((total_frames, arr.shape[1], arr.shape[2], arr.shape[3]), dtype="int")
elif arr.shape[0] > total_frames:
logger.info(
f"Clipping {arr.shape[0] - total_frames} frames "
f"(original: {arr.shape[0]}, requested: {total_frames})."
)
return arr[:total_frames]
elif arr.shape[0] < total_frames:
logger.info(
f"Duplicating last frame {total_frames - arr.shape[0]} times "
f"(original: {arr.shape[0]}, requested: {total_frames})."
)
return np.concatenate(
[arr, np.tile(arr[-1], (total_frames - arr.shape[0], 1, 1, 1))], axis=0
)
def get_frame_time_estimates(path: os.PathLike):
probe = ffmpeg.probe(str(path), show_entries="frame=best_effort_timestamp_time")
return [float(x["best_effort_timestamp_time"]) for x in probe["frames"]]
class VideoMetadata(BaseModel):
height: int
width: int
n_frames: int
duration_s: float
fps: int
@classmethod
def from_video(cls, path: os.PathLike):
stream = get_video_stream(path)
return cls(
height=int(stream["height"]),
width=int(stream["width"]),
n_frames=int(stream["nb_frames"]),
duration_s=float(stream["duration"]),
fps=int(Fraction(stream["r_frame_rate"])), # reported, not average
)
class VideoLoaderConfig(BaseModel):
"""
Configuration for load_video_frames.
Args:
crop_bottom_pixels (int, optional): Number of pixels to crop from the bottom of the video
(prior to resizing to `video_height`).
i_frames (bool, optional): Only load the I-Frames. See
https://en.wikipedia.org/wiki/Video_compression_picture_types#Intra-coded_(I)_frames/slices_(key_frames)
scene_threshold (float, optional): Only load frames that correspond to scene changes.
See http://www.ffmpeg.org/ffmpeg-filters.html#select_002c-aselect
megadetector_lite_config (MegadetectorLiteYoloXConfig, optional): Configuration of
MegadetectorLiteYoloX frame selection model.
frame_selection_height (int, optional): Resize the video to this height in pixels, prior to
frame selection. If None, the full size video will be used for frame selection. Using full
size images (setting to None) is recommended for MegadetectorLite, especially if your
species of interest are smaller.
frame_selection_width (int, optional): Resize the video to this width in pixels, prior to
frame selection.
total_frames (int, optional): Number of frames that should ultimately be returned.
ensure_total_frames (bool): Selecting the number of frames by resampling may result in one
more or fewer frames due to rounding. If True, ensure the requested number of frames
is returned by either clipping or duplicating the final frame. Raises an error if no
frames have been selected. Otherwise, return the array unchanged.
fps (float, optional): Resample the video evenly from the entire duration to a specific
number of frames per second.
early_bias (bool, optional): Resamples to 24 fps and selects 16 frames biased toward the
front (strategy used by competition winner).
frame_indices (list(int), optional): Select specific frame numbers. Note: frame selection
is done after any resampling.
evenly_sample_total_frames (bool, optional): Reach the total number of frames specified by
evenly sampling from the duration of the video. Defaults to False.
pix_fmt (str, optional): ffmpeg pixel format, defaults to 'rgb24' for RGB channels; can be
changed to 'bgr24' for BGR.
model_input_height (int, optional): After frame selection, resize the video to this height
in pixels.
model_input_width (int, optional): After frame selection, resize the video to this width in
pixels.
cache_dir (Path, optional): Cache directory where preprocessed videos will be saved
upon first load. Alternatively, can be set with VIDEO_CACHE_DIR environment variable.
Defaults to None, which means videos will not be cached. Provided there is enough space
on your machine, it is highly encouraged to cache videos for training as this will
speed up all subsequent epochs. If you are predicting on the same videos with the
same video loader configuration, this will save time on future runs.
cleanup_cache (bool): Whether to delete the cache dir after training or predicting ends.
Defaults to False.
"""
crop_bottom_pixels: Optional[int] = None
i_frames: Optional[bool] = False
scene_threshold: Optional[float] = None
megadetector_lite_config: Optional[MegadetectorLiteYoloXConfig] = None
frame_selection_height: Optional[int] = None
frame_selection_width: Optional[int] = None
total_frames: Optional[int] = None
ensure_total_frames: Optional[bool] = True
fps: Optional[float] = None
early_bias: Optional[bool] = False
frame_indices: Optional[List[int]] = None
evenly_sample_total_frames: Optional[bool] = False
pix_fmt: Optional[str] = "rgb24"
model_input_height: Optional[int] = None
model_input_width: Optional[int] = None
cache_dir: Optional[Path] = None
cleanup_cache: bool = False
class Config:
extra = "forbid"
@validator("cache_dir", always=True)
def validate_video_cache_dir(cls, cache_dir):
"""Set up cache directory for preprocessed videos. Config argument takes precedence
over environment variable.
"""
if cache_dir is None:
cache_dir = os.getenv("VIDEO_CACHE_DIR", None)
if cache_dir in ["", "0"]:
cache_dir = None
if cache_dir is not None:
cache_dir = Path(cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir
@root_validator(skip_on_failure=True)
def check_height_and_width(cls, values):
if (values["frame_selection_height"] is None) ^ (values["frame_selection_width"] is None):
raise ValueError(
f"Must provide both frame_selection_height and frame_selection_width or neither. Values provided are {values}."
)
if (values["model_input_height"] is None) ^ (values["model_input_width"] is None):
raise ValueError(
f"Must provide both model_input_height and model_input_width or neither. Values provided are {values}."
)
return values
@root_validator(skip_on_failure=True)
def check_fps_compatibility(cls, values):
if values["fps"] and (
values["evenly_sample_total_frames"] or values["i_frames"] or values["scene_threshold"]
):
raise ValueError(
f"fps cannot be used with evenly_sample_total_frames, i_frames, or scene_threshold. Values provided are {values}."
)
return values
@root_validator(skip_on_failure=True)
def check_i_frame_compatibility(cls, values):
if values["scene_threshold"] and values["i_frames"]:
raise ValueError(
f"i_frames cannot be used with scene_threshold. Values provided are {values}."
)
return values
@root_validator(skip_on_failure=True)
def check_early_bias_compatibility(cls, values):
if values["early_bias"] and (
values["i_frames"]
or values["scene_threshold"]
or values["total_frames"]
or values["evenly_sample_total_frames"]
or values["fps"]
):
raise ValueError(
f"early_bias cannot be used with i_frames, scene_threshold, total_frames, evenly_sample_total_frames, or fps. Values provided are {values}."
)
return values
@root_validator(skip_on_failure=True)
def check_frame_indices_compatibility(cls, values):
if values["frame_indices"] and (
values["total_frames"]
or values["scene_threshold"]
or values["i_frames"]
or values["early_bias"]
or values["evenly_sample_total_frames"]
):
raise ValueError(
f"frame_indices cannot be used with total_frames, scene_threshold, i_frames, early_bias, or evenly_sample_total_frames. Values provided are {values}."
)
return values
@root_validator(skip_on_failure=True)
def check_megadetector_lite_compatibility(cls, values):
if values["megadetector_lite_config"] and (
values["early_bias"] or values["evenly_sample_total_frames"]
):
raise ValueError(
f"megadetector_lite_config cannot be used with early_bias or evenly_sample_total_frames. Values provided are {values}."
)
return values
@root_validator(skip_on_failure=True)
def check_evenly_sample_total_frames_compatibility(cls, values):
if values["evenly_sample_total_frames"] is True and values["total_frames"] is None:
raise ValueError(
f"total_frames must be specified if evenly_sample_total_frames is used. Values provided are {values}."
)
if values["evenly_sample_total_frames"] and (
values["scene_threshold"]
or values["i_frames"]
or values["fps"]
or values["early_bias"]
):
raise ValueError(
f"evenly_sample_total_frames cannot be used with scene_threshold, i_frames, fps, or early_bias. Values provided are {values}."
)
return values
@root_validator(skip_on_failure=True)
def validate_total_frames(cls, values):
if values["megadetector_lite_config"] is not None:
# set n frames for megadetector_lite_config if only specified by total_frames
if values["megadetector_lite_config"].n_frames is None:
values["megadetector_lite_config"].n_frames = values["total_frames"]
# set total frames if only specified in megadetector_lite_config
if values["total_frames"] is None:
values["total_frames"] = values["megadetector_lite_config"].n_frames
return values
def get_cached_array_path(vid_path, config):
"""Get the path to where the cached array would be, if it exists.
vid_path: string path to the video, or Path
config: VideoLoaderConfig
returns: Path object to the cached data
"""
assert isinstance(config, VideoLoaderConfig)
# don't include `cleanup_cache` or `cache_dir` in the hashed config
# NOTE: sorting the keys avoids a cache miss if we see the same config in a different order;
# might not be necessary with a VideoLoaderConfig
config_dict = config.dict()
keys = config_dict.keys() - {"cleanup_cache", "cache_dir"}
hashed_part = {k: config_dict[k] for k in sorted(keys)}
# hash config for inclusion in path
hash_str = hashlib.sha1(str(hashed_part).encode("utf-8")).hexdigest()
logger.opt(lazy=True).debug(f"Generated hash {hash_str} from {hashed_part}")
# strip leading "/" in absolute path
vid_path = AnyPath(str(vid_path).lstrip("/"))
# if the video is in S3, drop the prefix and bucket name
if isinstance(vid_path, S3Path):
vid_path = AnyPath(vid_path.key)
cache_dir = config.cache_dir
npy_path = AnyPath(cache_dir) / hash_str / vid_path.with_suffix(".npy")
return npy_path
class npy_cache:
def __init__(self, cache_path: Optional[Path] = None, cleanup: bool = False):
self.cache_path = cache_path
self.cleanup = cleanup
def __call__(self, f):
def _wrapped(*args, **kwargs):
try:
vid_path = kwargs["filepath"]
except Exception:
vid_path = args[0]
try:
config = kwargs["config"]
except Exception:
config = VideoLoaderConfig(**kwargs)
# NOTE: what should we do if this assert fails?
assert config.cache_dir == self.cache_path
# get the path for the cached data
npy_path = get_cached_array_path(vid_path, config)
# make parent directories since we're using absolute paths
npy_path.parent.mkdir(parents=True, exist_ok=True)
if npy_path.exists():
logger.debug(f"Loading from cache {npy_path}: size {npy_path.stat().st_size}")
return np.load(npy_path)
else:
logger.debug(f"Loading video from disk: {vid_path}")
loaded_video = f(*args, **kwargs)
np.save(npy_path, loaded_video)
logger.debug(f"Wrote to cache {npy_path}: size {npy_path.stat().st_size}")
return loaded_video
if self.cache_path is not None:
return _wrapped
else:
return f
def __del__(self):
if hasattr(self, "cache_path") and self.cleanup and self.cache_path.exists():
if self.cache_path.parents[0] == tempfile.gettempdir():
logger.info(f"Deleting cache dir {self.cache_path}.")
rmtree(self.cache_path)
else:
logger.warning(
"Bravely refusing to delete directory that is not a subdirectory of the "
"system temp directory. If you really want to delete, do so manually using:\n "
f"rm -r {self.cache_path}"
)
def load_and_repeat_image(path, target_size=(224, 224), repeat_count=4):
"""
Loads an image, resizes it, and repeats it N times.
Args:
path: Path to the image file.
target_size: A tuple (w, h) representing the desired width and height of the resized image.
repeat_count: Number of times to repeat the image.
Returns:
A NumPy array of shape (N, h, w, 3) representing the repeated image.
"""
image = cv2.imread(str(path))
# Resize the image in same way as video frames are in `load_video_frames`
image = cv2.resize(
image,
target_size,
# https://stackoverflow.com/a/51042104/1692709
interpolation=(
cv2.INTER_LINEAR
if image.shape[1] < target_size[0] # compare image width with target width
else cv2.INTER_AREA
),
)
image_array = np.expand_dims(image, axis=0)
# Repeat the image N times
repeated_image = np.repeat(image_array, repeat_count, axis=0)
return repeated_image
def load_video_frames(
filepath: os.PathLike,
config: Optional[VideoLoaderConfig] = None,
**kwargs,
):
"""Loads frames from videos using fast ffmpeg commands.
Supports images as well, but it is inefficient since we just replicate the frames.
Args:
filepath (os.PathLike): Path to the video.
config (VideoLoaderConfig, optional): Configuration for video loading.
**kwargs: Optionally, arguments for VideoLoaderConfig can be passed in directly.
Returns:
np.ndarray: An array of video frames with dimensions (time x height x width x channels).
"""
if not Path(filepath).exists():
raise FileNotFoundError(f"No file found at {filepath}")
if config is None:
config = VideoLoaderConfig(**kwargs)
if Path(filepath).suffix.lower() in IMAGE_SUFFIXES:
return load_and_repeat_image(
filepath,
target_size=(config.model_input_width, config.model_input_height),
repeat_count=config.total_frames,
)
video_stream = get_video_stream(filepath)
w = int(video_stream["width"])
h = int(video_stream["height"])
pipeline = ffmpeg.input(str(filepath))
pipeline_kwargs = {}
if (config.crop_bottom_pixels is not None) and (config.crop_bottom_pixels > 0):
# scale to ensure all frames are the same height and we can crop
pipeline = pipeline.filter("scale", f"{w},{h}")
pipeline = pipeline.crop("0", "0", "iw", f"ih-{config.crop_bottom_pixels}")
h = h - config.crop_bottom_pixels
if config.evenly_sample_total_frames:
config.fps = config.total_frames / float(video_stream["duration"])
if config.early_bias:
config.fps = 24 # competition frame selection assumes 24 frames per second
config.total_frames = 16 # used for ensure_total_frames
if config.fps:
pipeline = pipeline.filter("fps", fps=config.fps, round="up")
if config.i_frames:
pipeline = pipeline.filter("select", "eq(pict_type,PICT_TYPE_I)")
if config.scene_threshold:
pipeline = pipeline.filter("select", f"gt(scene,{config.scene_threshold})")
if config.frame_selection_height and config.frame_selection_width:
pipeline = pipeline.filter(
"scale", f"{config.frame_selection_width},{config.frame_selection_height}"
)
w, h = config.frame_selection_width, config.frame_selection_height
if config.early_bias:
config.frame_indices = [2, 8, 12, 18, 24, 36, 48, 60, 72, 84, 96, 108, 120, 132, 144, 156]
if config.frame_indices:
pipeline = pipeline.filter("select", "+".join(f"eq(n,{f})" for f in config.frame_indices))
pipeline_kwargs = {"vsync": 0}
pipeline = pipeline.output(
"pipe:", format="rawvideo", pix_fmt=config.pix_fmt, **pipeline_kwargs
)
try:
out, err = pipeline.run(capture_stdout=True, capture_stderr=True)
except ffmpeg.Error as exc:
raise ZambaFfmpegException(exc.stderr)
arr = np.frombuffer(out, np.uint8).reshape([-1, h, w, 3])
if config.megadetector_lite_config is not None:
mdlite = MegadetectorLiteYoloX(config=config.megadetector_lite_config)
detection_probs = mdlite.detect_video(video_arr=arr)
arr = mdlite.filter_frames(arr, detection_probs)
if (config.model_input_height is not None) and (config.model_input_width is not None):
resized_frames = np.zeros(
(arr.shape[0], config.model_input_height, config.model_input_width, 3), np.uint8
)
for ix, f in enumerate(arr):
if (f.shape[0] != config.model_input_height) or (
f.shape[1] != config.model_input_width
):
f = cv2.resize(
f,
(config.model_input_width, config.model_input_height),
# https://stackoverflow.com/a/51042104/1692709
interpolation=(
cv2.INTER_LINEAR
if f.shape[1] < config.model_input_width
else cv2.INTER_AREA
),
)
resized_frames[ix, ...] = f
arr = np.array(resized_frames)
if config.ensure_total_frames:
arr = ensure_frame_number(arr, total_frames=config.total_frames)
return arr
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/result.py | zamba/images/result.py | from typing import Any
import pandas as pd
from PIL import Image
from pydantic import BaseModel
class ImageDetectionResult(BaseModel):
category: str
conf: float
bbox: list[
float
] # MegaDetector bbox is relative measures from top left [x1, y1, width, height]
classifications: list[list]
class ImageResult(BaseModel):
file: str
detections: list[ImageDetectionResult]
class ClassificationResultMegadetectorFormat(BaseModel):
info: dict[str, Any]
detection_categories: dict[str, str]
classification_categories: dict[str, str]
images: list[ImageResult]
def get_top_3_classifications(row, species) -> list:
row_to_compare = row[species]
row_to_compare = pd.to_numeric(row_to_compare, errors="coerce")
return [[species.index(col), value] for col, value in row_to_compare.nlargest(3).items()]
def results_to_megadetector_format(
df: pd.DataFrame, species: list
) -> ClassificationResultMegadetectorFormat:
classification_categories = {idx: specie for idx, specie in enumerate(species)}
info = {}
detection_categories = {"1": "animal", "2": "person", "3": "vehicle"}
image_results = {}
for _, row in df.iterrows():
filepath = row["filepath"]
with Image.open(filepath) as img:
width, height = img.size
detection_category = row["detection_category"]
if image_results.get(filepath) is None:
image_results[filepath] = ImageResult(file=filepath, detections=[])
detection_classifications = []
if detection_category == "1":
detection_classifications = get_top_3_classifications(row, species)
image_results[filepath].detections.append(
ImageDetectionResult(
category=detection_category,
conf=row["detection_conf"],
bbox=[
row["x1"] / width,
row["y1"] / height,
(row["x2"] - row["x1"]) / width,
(row["y2"] - row["y1"]) / height,
], # MegaDetector bbox is relative measures from top left [x1, y1, width, height]
classifications=detection_classifications,
)
)
return ClassificationResultMegadetectorFormat(
info=info,
detection_categories=detection_categories,
classification_categories=classification_categories,
images=list(image_results.values()),
)
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/config.py | zamba/images/config.py | import json
import os
from enum import StrEnum
from pathlib import Path
from typing import Any, Dict, Optional, Union
import appdirs
import pandas as pd
import torch
from loguru import logger
from pydantic import DirectoryPath, FilePath, root_validator, validator
from tqdm.contrib.concurrent import process_map
from zamba.images.bbox import BboxInputFormat, bbox_json_to_df
from zamba.models.config import (
SchedulerConfig,
ZambaBaseModel,
get_filepaths,
make_split,
validate_model_cache_dir,
validate_model_name_and_checkpoint,
)
from zamba.models.utils import RegionEnum
from zamba.settings import IMAGE_SUFFIXES
GPUS_AVAILABLE = torch.cuda.device_count()
class ResultsFormat(StrEnum):
CSV = "csv"
MEGADETECTOR = "megadetector"
class ImageModelEnum(StrEnum):
LILA_SCIENCE = "lila.science"
class ZambaImageConfig(ZambaBaseModel):
save_dir: Optional[Path] = None
@root_validator(skip_on_failure=True)
def validate_save(cls, values):
save_dir = values["save_dir"]
if save_dir is None:
save_dir = Path.cwd()
values["save_dir"] = save_dir
return values
def get_image_filepaths(cls, values):
return get_filepaths(values, IMAGE_SUFFIXES)
class ImageClassificationPredictConfig(ZambaImageConfig):
"""
Configuration for using an image classification model for inference.
Args:
data_dir (DirectoryPath): Path to a directory containing images for
inference. Defaults to the current working directory.
filepaths (FilePath, optional): Path to a CSV containing images for inference, with
one row per image in the data_dir. There must be a column called
'filepath' (absolute or relative to the data_dir). If None, uses
all image files in data_dir. Defaults to None.
checkpoint (FilePath, optional): Path to a custom checkpoint file (.ckpt)
generated by zamba that can be used to generate predictions. If None,
defaults to a pretrained model. Defaults to None.
model_name (str, optional): Name of the model to use for inference. Currently
supports 'lila.science'. Defaults to 'lila.science'.
save (bool): Whether to save out predictions. If False, predictions are
not saved. Defaults to True.
save_dir (Path, optional): An optional directory in which to save the model
predictions and configuration yaml. If no save_dir is specified and save=True,
outputs will be written to the current working directory. Defaults to None.
overwrite (bool): If True, overwrite outputs in save_dir if they exist.
Defaults to False.
crop_images (bool): Whether to preprocess images using Megadetector or bounding boxes
from labels file. Focuses the model on regions of interest. Defaults to True.
detections_threshold (float): Confidence threshold for Megadetector detections.
Only applied when crop_images=True. Defaults to 0.2.
gpus (int): Number of GPUs to use for inference.
Defaults to all of the available GPUs found on the machine.
num_workers (int): Number of subprocesses to use for data loading.
Defaults to 3.
image_size (int, optional): Image size (height and width) for the input to the
classification model. Defaults to 224.
results_file_format (ResultsFormat): The format in which to output the predictions.
Currently 'csv' and 'megadetector' JSON formats are supported. Default is 'csv'.
results_file_name (Path, optional): The filename for the output predictions in the
save directory. Defaults to 'zamba_predictions.csv' or 'zamba_predictions.json'
depending on results_file_format.
model_cache_dir (Path, optional): Cache directory where downloaded model weights
will be saved. If None and no environment variable is set, will use your
default cache directory. Defaults to None.
weight_download_region (str): s3 region to download pretrained weights from.
Options are "us" (United States), "eu" (Europe), or "asia" (Asia Pacific).
Defaults to "us".
"""
checkpoint: Optional[FilePath] = None
model_name: Optional[str] = ImageModelEnum.LILA_SCIENCE.value
filepaths: Optional[Union[FilePath, pd.DataFrame]] = None
data_dir: DirectoryPath
save: bool = True
overwrite: bool = False
crop_images: bool = True
detections_threshold: float = 0.2
gpus: int = GPUS_AVAILABLE
num_workers: int = 3
image_size: Optional[int] = 224
results_file_format: ResultsFormat = ResultsFormat.CSV
results_file_name: Optional[Path] = Path("zamba_predictions.csv")
model_cache_dir: Optional[Path] = None
weight_download_region: str = RegionEnum.us.value
class Config: # type: ignore
arbitrary_types_allowed = True
_validate_model_cache_dir = validator("model_cache_dir", allow_reuse=True, always=True)(
validate_model_cache_dir
)
_get_filepaths = root_validator(allow_reuse=True, pre=False, skip_on_failure=True)(
get_image_filepaths
)
@root_validator(skip_on_failure=True)
def validate_save_dir(cls, values):
save_dir = values["save_dir"]
results_file_name = values["results_file_name"]
save = values["save"]
# if no save_dir but save is True, use current working directory
if save_dir is None and save:
save_dir = Path.cwd()
if save_dir is not None:
# check if files exist
save_path = save_dir / results_file_name
if values["results_file_format"] == ResultsFormat.MEGADETECTOR:
save_path = save_path.with_suffix(".json")
if save_path.exists() and not values["overwrite"]:
raise ValueError(
f"{save_path.name} already exists in {save_dir}. If you would like to overwrite, set overwrite=True"
)
# make a directory if needed
save_dir.mkdir(parents=True, exist_ok=True)
# set save to True if save_dir is set
if not save:
save = True
values["save_dir"] = save_dir
values["save"] = save
return values
@root_validator(skip_on_failure=True)
def validate_filepaths(cls, values):
if isinstance(values["filepaths"], pd.DataFrame):
files_df = values["filepaths"]
else:
files_df = pd.DataFrame(pd.read_csv(values["filepaths"]))
if "filepath" not in files_df.columns:
raise ValueError(f"{values['filepath']} must contain a `filepath` column.")
else:
files_df = files_df[["filepath"]]
duplicated = files_df.filepath.duplicated()
if duplicated.sum() > 0:
logger.warning(
f"Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video."
)
files_df = files_df[["filepath"]].drop_duplicates()
# The filepath column can come in as a str or a Path-like and either absolute
# or relative to the data directory. Handle all those cases.
filepaths = []
for path in files_df["filepath"]:
path = Path(path)
if not path.is_absolute():
# Assume relative to data directory
path = values["data_dir"] / path
filepaths.append(str(path))
files_df["filepath"] = filepaths
values["filepaths"] = files_df
return values
@root_validator(skip_on_failure=True)
def validate_detections_threshold(cls, values):
threshold = values["detections_threshold"]
if threshold <= 0 or threshold >= 1:
raise ValueError(
"Detections threshold value should be greater than zero and less than one."
)
return values
@root_validator(skip_on_failure=True)
def validate_image_size(cls, values):
if values["image_size"] <= 0:
raise ValueError("Image size should be greater than or equal 64")
return values
_validate_model_name_and_checkpoint = root_validator(allow_reuse=True, skip_on_failure=True)(
validate_model_name_and_checkpoint
)
class ImageClassificationTrainingConfig(ZambaImageConfig):
"""Configuration for running image classification training.
Args:
data_dir (Path): Path to directory containing the training images.
labels (Union[FilePath, pd.DataFrame]): Path to a CSV or JSON file with labels, or a pandas DataFrame.
For CSV files, must contain 'filepath' and 'label' columns.
For JSON files, must be in COCO or other supported format as specified by labels_format.
labels_format (BboxFormat): Format for bounding box annotations when labels are provided as JSON.
Options are defined in the BboxFormat enum. Defaults to BboxFormat.COCO.
checkpoint (FilePath, optional): Path to a custom checkpoint file (.ckpt) generated by zamba
that can be used to resume training. If None and from_scratch=False, will load a pretrained model.
Defaults to None.
model_name (str, optional): Name of the model to use. Currently supports 'lila.science'.
Defaults to 'lila.science'.
name (str, optional): Classification experiment name used for MLFlow tracking.
Defaults to 'image-classification'.
max_epochs (int): Maximum number of training epochs. Defaults to 100.
lr (float, optional): Learning rate. If None, will attempt to find a good learning rate.
Defaults to None.
image_size (int): Input image size (height and width) for the model. Defaults to 224.
batch_size (int, optional): Physical batch size for training. Defaults to 16.
accumulated_batch_size (int, optional): Virtual batch size for gradient accumulation.
Useful to match batch sizes from published papers with constrained GPU memory.
If None, uses batch_size. Defaults to None.
early_stopping_patience (int): Number of epochs with no improvement after which training
will be stopped. Defaults to 3.
extra_train_augmentations (bool): Whether to use additional image augmentations.
If false, uses simple transforms for camera trap imagery (random perspective shift,
random horizontal flip, random rotation).
If True, adds complex transforms beyond the basic set (random grayscale, equalize, etc.).
Defaults to False.
num_workers (int): Number of workers for data loading. Defaults to 2/3 of available CPU cores.
accelerator (str): PyTorch Lightning accelerator type ('gpu' or 'cpu').
Defaults to 'gpu' if CUDA is available, otherwise 'cpu'.
devices (Any): Which devices to use for training. Can be int, list of ints, or 'auto'.
Defaults to 'auto'.
crop_images (bool): Whether to preprocess images using Megadetector or bounding boxes
from labels. Defaults to True.
detections_threshold (float): Confidence threshold for Megadetector.
Only used when crop_images=True and no bounding boxes are provided in labels.
Defaults to 0.2.
checkpoint_path (Path): Directory where training outputs will be saved.
Defaults to current working directory.
weighted_loss (bool): Whether to use class-weighted loss during training.
Helpful for imbalanced datasets. Defaults to False.
mlflow_tracking_uri (str, optional): URI for MLFlow tracking server.
Defaults to './mlruns'.
from_scratch (bool): Whether to train the model from scratch (base weights)
instead of using a pretrained checkpoint. Defaults to False.
use_default_model_labels (bool, optional): Whether to use the full set of default model
labels or only the labels in the provided dataset.
If set to False, will replace the model head for finetuning and output only
the species in the provided labels file.
If None, automatically determined based on the labels provided.
scheduler_config (Union[str, SchedulerConfig], optional): Learning rate scheduler
configuration. If 'default', uses the scheduler from original training.
Defaults to 'default'.
split_proportions (Dict[str, int], optional): Proportions for train/val/test splits
if no split column is provided in labels. Defaults to {'train': 3, 'val': 1, 'test': 1}.
model_cache_dir (Path, optional): Directory where downloaded model weights will be cached.
If None, uses the system's default cache directory. Defaults to None.
cache_dir (Path, optional): Directory where cropped/processed images will be cached.
Defaults to a 'image_cache' subdirectory in the system's cache directory.
weight_download_region (str): S3 region for downloading pretrained weights.
Options are 'us', 'eu', or 'asia'. Defaults to 'us'.
species_in_label_order (list, optional): Optional list to specify the order of
species labels in the model output. Defaults to None.
"""
data_dir: Path
labels: Union[FilePath, pd.DataFrame]
labels_format: BboxInputFormat = BboxInputFormat.COCO
checkpoint: Optional[FilePath] = None
model_name: Optional[str] = ImageModelEnum.LILA_SCIENCE.value
name: Optional[str] = "image-classification"
max_epochs: int = 100
lr: Optional[float] = None # if None, will find a good learning rate
image_size: int = 224
batch_size: Optional[int] = 16
accumulated_batch_size: Optional[int] = None
early_stopping_patience: int = 3
extra_train_augmentations: bool = False
num_workers: int = int(os.cpu_count() // 1.5) # default use 2/3 of available cores
accelerator: str = "gpu" if torch.cuda.is_available() else "cpu"
devices: Any = "auto"
crop_images: bool = True
detections_threshold: float = 0.2
checkpoint_path: Path = Path.cwd()
weighted_loss: bool = False
mlflow_tracking_uri: Optional[str] = "./mlruns"
from_scratch: Optional[bool] = False
use_default_model_labels: Optional[bool] = None
scheduler_config: Optional[Union[str, SchedulerConfig]] = "default"
split_proportions: Optional[Dict[str, int]] = {"train": 3, "val": 1, "test": 1}
model_cache_dir: Optional[Path] = None
cache_dir: Optional[Path] = Path(appdirs.user_cache_dir()) / "zamba" / "image_cache"
weight_download_region: str = RegionEnum.us.value
species_in_label_order: Optional[list] = None
class Config:
arbitrary_types_allowed = True
_validate_model_cache_dir = validator("model_cache_dir", allow_reuse=True, always=True)(
validate_model_cache_dir
)
@staticmethod
def process_json_annotations(labels, labels_format: BboxInputFormat) -> pd.DataFrame:
return bbox_json_to_df(labels, bbox_format=labels_format)
@root_validator(skip_on_failure=True)
def process_cache_dir(cls, values):
cache_dir = values["cache_dir"]
if not cache_dir.exists():
cache_dir.mkdir(parents=True)
logger.info("Cache dir created.")
return values
@root_validator(skip_on_failure=True)
def validate_labels(cls, values):
"""Validate and load labels"""
logger.info("Validating labels")
if isinstance(values["labels"], pd.DataFrame):
pass
elif values["labels"].suffix == ".json":
with open(values["labels"], "r") as f:
values["labels"] = cls.process_json_annotations(
json.load(f), values["labels_format"]
)
else:
values["labels"] = pd.read_csv(values["labels"])
return values
@root_validator(skip_on_failure=True)
def validate_devices(cls, values):
# per pytorch lightning docs, should be int or list of ints
# https://lightning.ai/docs/pytorch/stable/common/trainer.html#devices
raw_val = values["devices"]
if "," in raw_val:
values["devices"] = [int(v) for v in raw_val]
elif raw_val == "auto":
pass
else:
values["devices"] = int(raw_val)
return values
@root_validator(skip_on_failure=True)
def validate_data_dir(cls, values):
if not os.path.exists(values["data_dir"]):
raise ValueError("Data dir doesn't exist.")
return values
@root_validator(skip_on_failure=True)
def validate_image_files(cls, values):
"""Validate and load image files."""
logger.info("Validating image files exist")
exists = process_map(
cls._validate_filepath,
(values["data_dir"] / values["labels"].filepath.path).items(),
chunksize=max(
1, len(values["labels"]) // 1000
), # chunks can be large; should be fast operation
total=len(values["labels"]),
)
file_existence = pd.DataFrame(exists).set_index(0)
exists = file_existence[2]
if not exists.all():
missing_files = file_existence[~exists]
example_missing = [str(f) for f in missing_files.head(3)[1].values]
logger.warning(
f"{(~exists).sum()} files in provided labels file do not exist on disk; ignoring those files. Example: {example_missing}..."
)
values["labels"] = values["labels"][exists]
return values
@root_validator(skip_on_failure=True)
def preprocess_labels(cls, values):
"""One hot encode and add splits."""
logger.info("Preprocessing labels.")
labels = values["labels"]
# lowercase to facilitate subset checking
labels["label"] = labels.label.str.lower()
# one hot encoding
labels = pd.get_dummies(labels.rename(columns={"label": "species"}), columns=["species"])
# We validate that all the images exist prior to this, so once this assembles the set of classes,
# we should have at least one example of each label and don't need to worry about filtering out classes
# with missing examples.
species_columns = labels.columns[labels.columns.str.contains("species_")]
values["species_in_label_order"] = species_columns.to_list()
indices = (
labels[species_columns].idxmax(axis=1).apply(lambda x: species_columns.get_loc(x))
)
labels["label"] = indices
# if no "split" column, set up train, val, and test split
if "split" not in labels.columns:
make_split(labels, values)
values["labels"] = labels.reset_index()
example_species = [
species.replace("species_", "") for species in values["species_in_label_order"][:3]
]
logger.info(
f"Labels preprocessed. {len(values['species_in_label_order'])} species found: {example_species}..."
)
return values
_validate_model_name_and_checkpoint = root_validator(allow_reuse=True, skip_on_failure=True)(
validate_model_name_and_checkpoint
)
@root_validator(skip_on_failure=True)
def validate_from_scratch(cls, values):
from_scratch = values["from_scratch"]
model_checkpoint = values["checkpoint"]
if (from_scratch is False or from_scratch is False) and model_checkpoint is None:
raise ValueError(
"You must specify checkpoint if you don't want to start training from scratch."
)
return values
@staticmethod
def _validate_filepath(ix_path):
ix, path = ix_path
path = Path(path)
return ix, path, path.exists() and path.stat().st_size > 0
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/__init__.py | zamba/images/__init__.py | from .classifier import ImageClassifierModule # noqa: F401
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/evaluate.py | zamba/images/evaluate.py | import io
from typing import Dict, List, Optional
import numpy as np
from PIL import Image
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
f1_score,
precision_score,
recall_score,
top_k_accuracy_score,
)
from zamba.metrics import compute_species_specific_metrics
class ClassificationEvaluator:
def __init__(
self,
labels: List[str],
):
self.labels = labels
self.num_classes = len(self.labels)
def get_metrics(self, y_true, y_pred) -> Dict[str, float]:
return {**self.top_k_accuracy_data(y_true, y_pred), **self.overall_metrics(y_true, y_pred)}
def species_score_metrics(self, y_true, y_pred) -> Dict[str, float]:
species_metrics = {}
for metric, label, value in compute_species_specific_metrics(y_true, y_pred, self.labels):
species_metrics[f"{metric}_{label}"] = value
return species_metrics
def confusion_matrix_data(self, y_true, y_pred):
return confusion_matrix(
y_true.argmax(axis=1),
y_pred.argmax(axis=1),
labels=[i for i in range(self.num_classes)],
normalize="true",
)
def confusion_matrix_plot(self, y_true, y_pred) -> Optional[Image.Image]:
try:
import matplotlib.pyplot as plt
except ImportError:
return None
cm = self.confusion_matrix_data(y_true, y_pred)
size = min(max(12, int(1.5 * self.num_classes)), 30)
fig, ax = plt.subplots(figsize=(size, size), dpi=150)
cax = ax.matshow(cm, cmap=plt.cm.Blues)
plt.colorbar(cax)
ax.set_xticks(np.arange(self.num_classes))
ax.set_yticks(np.arange(self.num_classes))
ax.set_xticklabels(self.labels)
ax.set_yticklabels(self.labels)
plt.xticks(rotation=45, ha="left")
for i in range(self.num_classes):
for j in range(self.num_classes):
ax.text(j, i, f"{cm[i, j]:.2f}", ha="center", va="center", color="black")
plt.xlabel("Predicted")
plt.ylabel("True")
plt.title("Normalized Confusion Matrix")
buf = io.BytesIO()
fig.savefig(buf)
buf.seek(0)
return Image.open(buf)
@staticmethod
def overall_metrics(y_true, y_pred) -> Dict[str, float]:
y_pred_labels = np.argmax(y_pred, axis=1)
y_test_labels = np.argmax(y_true, axis=1)
return {
"accuracy": accuracy_score(y_test_labels, y_pred_labels), # zero_division
"recall": recall_score(
y_test_labels, y_pred_labels, average="macro", zero_division=False
),
"precision": precision_score(
y_test_labels, y_pred_labels, average="macro", zero_division=False
),
"f1": f1_score(y_test_labels, y_pred_labels, average="macro", zero_division=False),
"weighted_recall": recall_score(
y_test_labels, y_pred_labels, average="weighted", zero_division=False
),
"weighted_precision": precision_score(
y_test_labels, y_pred_labels, average="weighted", zero_division=False
),
"weighted_f1": f1_score(
y_test_labels, y_pred_labels, average="weighted", zero_division=False
),
}
def top_k_accuracy_data(self, y_true, y_pred, ks: Optional[List[int]] = None):
if ks is None:
ks = [1, 3, 5, 10]
k_scores = {
f"top_{k}_accuracy": top_k_accuracy_score(
y_true.argmax(axis=1), y_pred, k=k, labels=list(range(self.num_classes))
)
for k in ks
}
return k_scores
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/manager.py | zamba/images/manager.py | from collections.abc import Iterable
from datetime import datetime, timezone
import json
import os
from pathlib import Path
import random
from functools import partial
import sys
import git
import mlflow
import pandas as pd
import pytorch_lightning as pl
import torch
from loguru import logger
from megadetector.detection import run_detector
from pytorch_lightning.callbacks import (
EarlyStopping,
ModelCheckpoint,
StochasticWeightAveraging,
)
from pytorch_lightning.loggers import MLFlowLogger
from pytorch_lightning.tuner.tuning import Tuner
from sklearn.utils.class_weight import compute_class_weight
from torch.nn import ModuleList
from torchvision.transforms import transforms
from tqdm import tqdm
import yaml
from zamba.images.classifier import ImageClassifierModule
from zamba.images.config import (
ImageClassificationPredictConfig,
ImageClassificationTrainingConfig,
ResultsFormat,
)
from zamba.images.data import ImageClassificationDataModule, load_image, absolute_bbox, BboxLayout
from zamba.images.result import results_to_megadetector_format
from zamba.models.model_manager import instantiate_model
from zamba.pytorch.transforms import resize_and_pad
def get_weights(split):
labels_df = split.filter(like="species_")
y_array = pd.from_dummies(labels_df).values.flatten()
classes = labels_df.columns.values
class_weights = compute_class_weight("balanced", classes=classes, y=y_array)
return torch.tensor(class_weights).to(torch.float32)
def predict(config: ImageClassificationPredictConfig) -> None:
image_transforms = transforms.Compose(
[
transforms.Lambda(partial(resize_and_pad, desired_size=config.image_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.45, 0.45, 0.45], std=[0.225, 0.225, 0.225]),
]
)
logger.info("Loading models")
detector = run_detector.load_detector("MDV5A", force_cpu=(os.getenv("RUNNER_OS") == "macOS"))
classifier_module = instantiate_model(
checkpoint=config.checkpoint,
)
logger.info("Running inference")
predictions = []
assert isinstance(config.filepaths, pd.DataFrame)
for filepath in tqdm(config.filepaths["filepath"]):
image = load_image(filepath)
results = detector.generate_detections_one_image(
image, filepath, detection_threshold=config.detections_threshold
)
for detection in results["detections"]:
try:
bbox = absolute_bbox(image, detection["bbox"], bbox_layout=BboxLayout.XYWH)
detection_category = detection["category"]
detection_conf = detection["conf"]
img = image.crop(bbox)
input_data = image_transforms(img)
with torch.no_grad():
y_hat = (
torch.softmax(classifier_module(input_data.unsqueeze(0)), dim=1)
.squeeze(0)
.numpy()
)
predictions.append((filepath, detection_category, detection_conf, bbox, y_hat))
except Exception as e:
logger.exception(e)
continue
if config.save:
df = pd.DataFrame(
predictions,
columns=["filepath", "detection_category", "detection_conf", "bbox", "result"],
)
# Split bbox into separate columns x1, y1, x2, y2
df[["x1", "y1", "x2", "y2"]] = pd.DataFrame(df["bbox"].tolist(), index=df.index)
# Split result into separate columns for each class using "species" from classifier module
species_df = pd.DataFrame(df["result"].tolist(), index=df.index)
species_df.columns = classifier_module.species
df = pd.concat([df, species_df], axis=1)
# Drop the original 'bbox' and 'result' columns
df = df.drop(columns=["bbox", "result"])
save_path = config.save_dir / config.results_file_name
logger.info("Saving results")
if config.results_file_format == ResultsFormat.CSV:
save_path = save_path.with_suffix(".csv")
df.to_csv(save_path, index=False)
elif config.results_file_format == ResultsFormat.MEGADETECTOR:
megadetector_format_results = results_to_megadetector_format(
df, classifier_module.species
)
save_path = save_path.with_suffix(".json")
with open(save_path, "w") as f:
json.dump(megadetector_format_results.dict(), f)
logger.info(f"Results saved to {save_path}")
def _save_metrics(
data: pl.LightningDataModule, trainer: pl.Trainer, model: pl.LightningModule, save_dir: Path
):
if data.test_dataloader() is not None and len(data.test_dataloader()) > 0:
logger.info("Calculating metrics on holdout set.")
test_metrics = trainer.test(model, dataloaders=data.test_dataloader())
with (save_dir / "test_metrics.json").open("w") as fp:
json.dump(test_metrics[0], fp, indent=2)
if data.val_dataloader() is not None and len(data.val_dataloader()) > 0:
logger.info("Calculating metrics on validation set.")
val_metrics = trainer.validate(model, dataloaders=data.val_dataloader())
with (save_dir / "val_metrics.json").open("w") as fp:
json.dump(val_metrics[0], fp, indent=2)
def _save_config(model, config):
try:
git_hash = git.Repo(search_parent_directories=True).head.object.hexsha
except git.exc.InvalidGitRepositoryError:
git_hash = None
configuration = {
"git_hash": git_hash,
"model_class": model.model_class,
"species": model.species,
"starting_learning_rate": model.hparams.lr,
"train_config": json.loads(config.json(exclude={"labels"})),
"training_start_time": datetime.now(timezone.utc).isoformat(),
}
config_path = config.save_dir / "train_configuration.yaml"
config_path.parent.mkdir(exist_ok=True, parents=True)
logger.info(f"Writing out full configuration to {config_path}.")
with config_path.open("w") as fp:
yaml.dump(configuration, fp)
def train(config: ImageClassificationTrainingConfig) -> pl.Trainer:
if config.save_dir:
logger.add(
str(config.save_dir / "training.log"),
level="INFO",
format="{time} - {name} - {level} - {message}",
)
if config.extra_train_augmentations:
train_transforms = transforms.Compose(
[
transforms.Lambda(partial(resize_and_pad, desired_size=config.image_size)),
transforms.RandomResizedCrop(
size=(config.image_size, config.image_size), scale=(0.75, 1.0)
),
transforms.RandomPerspective(distortion_scale=0.2, p=0.5),
transforms.RandomHorizontalFlip(p=0.3),
transforms.RandomApply(ModuleList([transforms.RandomRotation((-22, 22))]), p=0.2),
transforms.RandomGrayscale(p=0.05),
transforms.RandomEqualize(p=0.05),
transforms.RandomAutocontrast(p=0.05),
transforms.RandomAdjustSharpness(
sharpness_factor=0.9, p=0.05
), # < 1 is more blurry
transforms.ToTensor(),
transforms.Normalize(mean=[0.45, 0.45, 0.45], std=[0.225, 0.225, 0.225]),
transforms.RandomErasing(p=0.25, scale=(0.02, 0.33), ratio=(0.3, 3.3)),
]
)
# defaults simple transforms are specifically chosen for camera trap imagery
# - random perspective shift
# - random horizontal flip (no vertical flip; unlikely animals appear upside down cuz gravity)
# - random rotation
else:
train_transforms = transforms.Compose(
[
transforms.Lambda(partial(resize_and_pad, desired_size=config.image_size)),
transforms.RandomPerspective(distortion_scale=0.2, p=0.5),
transforms.RandomHorizontalFlip(p=0.3),
transforms.RandomApply(ModuleList([transforms.RandomRotation((-22, 22))]), p=0.2),
transforms.ToTensor(),
transforms.Normalize(mean=[0.45, 0.45, 0.45], std=[0.225, 0.225, 0.225]),
]
)
validation_transforms = transforms.Compose(
[
transforms.Lambda(partial(resize_and_pad, desired_size=config.image_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.45, 0.45, 0.45], std=[0.225, 0.225, 0.225]),
]
)
os.makedirs(config.checkpoint_path, exist_ok=True)
checkpoint_callback = ModelCheckpoint(
monitor="val_loss",
dirpath=config.checkpoint_path,
filename=f"zamba-{config.name}-{config.model_name}" + "{epoch:02d}-{val_loss:.3f}",
save_top_k=1,
mode="min",
)
early_stopping = EarlyStopping(
monitor="val_loss", patience=config.early_stopping_patience, mode="min"
)
swa = StochasticWeightAveraging(swa_lrs=1e-2)
callbacks = [swa, early_stopping, checkpoint_callback]
# Enable system metrics logging in MLflow
mlflow.enable_system_metrics_logging()
mlflow_logger = MLFlowLogger(
run_name=f"zamba-{config.name}-{config.model_name}-{config.lr}-{random.randint(1000, 9999)}",
experiment_name=config.name,
tracking_uri=config.mlflow_tracking_uri,
)
data = ImageClassificationDataModule(
data_dir=config.data_dir,
cache_dir=config.cache_dir,
annotations=config.labels,
train_transforms=train_transforms,
test_transforms=validation_transforms,
batch_size=config.batch_size,
num_workers=config.num_workers,
detection_threshold=config.detections_threshold,
crop_images=config.crop_images,
)
loss_fn = torch.nn.CrossEntropyLoss()
if config.weighted_loss is True:
loss_fn = torch.nn.CrossEntropyLoss(weight=get_weights(data.annotations), reduction="mean")
# Calculate number of training batches
num_training_batches = len(data.train_dataloader())
if config.from_scratch:
initial_lr = config.lr if config.lr is not None else 1e-5 # reasonable empirical default
classifier_module = ImageClassifierModule(
model_name=config.model_name,
species=config.species_in_label_order,
lr=initial_lr,
image_size=config.image_size,
batch_size=config.batch_size if config.batch_size is not None else 16,
num_training_batches=num_training_batches,
loss=loss_fn,
pin_memory=config.accelerator == "gpu",
scheduler="CosineAnnealingLR",
scheduler_params={"T_max": config.early_stopping_patience},
)
else:
classifier_module = instantiate_model(
checkpoint=config.checkpoint,
labels=config.labels,
scheduler_config=config.scheduler_config,
from_scratch=config.from_scratch,
model_name=None,
use_default_model_labels=config.use_default_model_labels,
species=config.species_in_label_order,
)
# compile for faster performance; disabled for MacOS which is not supported
if sys.platform != "darwin":
classifier = torch.compile(classifier_module)
else:
classifier = classifier_module
# lower precision multiplication to speed up training
try:
torch.set_float32_matmul_precision("medium")
torch._dynamo.config.cache_size_limit = (
16 # cache more functions than default 8 to avoid recompiling
)
except Exception:
logger.warning("Could not set float32 matmul precision to medium")
# Set log_every_n_steps to a reasonable value (e.g., 1/10th of batches, minimum of 1)
log_every_n_steps = max(1, num_training_batches // 10)
# get the strategy based on devices
if isinstance(config.devices, int) and config.devices > 1:
strategy = "ddp"
elif isinstance(config.devices, Iterable) and len(config.devices) > 1:
strategy = "ddp"
else:
strategy = "auto"
if config.accumulated_batch_size is not None:
accumulate_n_batches = config.accumulated_batch_size // classifier_module.batch_size
else:
accumulate_n_batches = 1
# Create trainers with different configurations
train_trainer = pl.Trainer(
max_epochs=config.max_epochs,
logger=mlflow_logger,
callbacks=callbacks,
devices=config.devices,
accelerator=config.accelerator,
strategy=strategy,
log_every_n_steps=log_every_n_steps,
accumulate_grad_batches=accumulate_n_batches,
)
# Single device trainers for lr finding and testing
single_device_trainer = pl.Trainer(
max_epochs=config.max_epochs,
logger=mlflow_logger,
devices=1,
accelerator=config.accelerator,
log_every_n_steps=log_every_n_steps,
accumulate_grad_batches=accumulate_n_batches,
)
tuner = Tuner(single_device_trainer) # Use single device trainer for tuning
# find largest feasible batch size if not set explicitly
if config.batch_size is None:
# Lightning asserts a batch size deep in its guts, so this makes sure that succeeds
data.batch_size = 8
logger.info("Finding maximum batch size")
tuner.scale_batch_size(
classifier, datamodule=data, mode="power", init_val=8, steps_per_trial=3
)
logger.info(f"Changing batch size to {data.batch_size}")
# I think only the model gets saved, but the data loader holds the batch size,
# so we need to make sure these stay in sync.
classifier.hparams.batch_size = data.batch_size
# find an optimal learning rate on single device
if config.lr is None:
logger.info("Finding a good learning rate")
lr_finder = tuner.lr_find(classifier, data)
new_lr = lr_finder.suggestion()
logger.info(f"Changing learning rate to {new_lr}")
# Make sure the new learning rate gets saved out as an hparam
classifier.hparams.lr = new_lr
_save_config(classifier, config)
# Train with distributed training
train_trainer.fit(
classifier,
train_dataloaders=data.train_dataloader(),
val_dataloaders=data.val_dataloader(),
)
_save_metrics(data, single_device_trainer, classifier, config.save_dir)
model_path = config.save_dir / (config.model_name + ".ckpt")
Path(checkpoint_callback.best_model_path).rename(model_path)
logger.info(f"Model checkpoint saved to {model_path}")
class ZambaImagesManager:
def predict(self, config: ImageClassificationPredictConfig):
predict(config)
def train(self, config: ImageClassificationTrainingConfig):
train(config)
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/classifier.py | zamba/images/classifier.py | import os
from typing import Any, List, Optional, Union
import numpy as np
import pytorch_lightning as pl
import timm
import torch
from torch.optim import AdamW
from torch.optim.lr_scheduler import LRScheduler
import torch.nn as nn
import torch.utils
from zamba.images.evaluate import ClassificationEvaluator
from zamba.models.registry import register_model
from zamba.pytorch_lightning.base_module import ZambaClassificationLightningModule
@register_model
class ImageClassifierModule(ZambaClassificationLightningModule):
def __init__(
self,
model_name: str,
species: List[str],
image_size: int,
batch_size: int,
num_training_batches: Optional[int] = None,
lr: float = 1e-5,
loss: Any = None,
finetune_from: Optional[Union[os.PathLike, str]] = None,
scheduler: Optional[LRScheduler] = None,
scheduler_params: Optional[dict] = None,
**kwargs: Any,
) -> None:
super().__init__(
species=species,
lr=lr,
scheduler=scheduler,
scheduler_params=scheduler_params,
**kwargs,
)
self.image_size = image_size
self.base_model_name = model_name
self.num_training_batches = num_training_batches
if finetune_from is None:
self.model = timm.create_model(
self.base_model_name, pretrained=True, num_classes=self.num_classes
)
else:
self.model = self.from_disk(finetune_from).model
self.model.head.fc = nn.Linear(self.model.head.fc.in_features, self.num_classes)
self.lr = lr
if loss is None:
loss = nn.CrossEntropyLoss()
self.loss_fn = loss
self.evaluator = ClassificationEvaluator(species)
self.save_hyperparameters(
"lr",
"image_size",
"batch_size",
"model_name",
"species",
"scheduler",
"scheduler_params",
)
def configure_optimizers(self):
# Use Adam optimizer
# per https://arxiv.org/pdf/2405.13698, we set weight decay to
# 1 / (lr * iter_per_epoch)
optimizer = AdamW(
self.parameters(),
lr=self.lr,
weight_decay=(
1 / (self.lr * self.num_training_batches)
if self.num_training_batches is not None
else 0.01
),
)
# Reset CyclicLR params assuming learning rate was found with lr_find or other empirical method
if "base_lr" in self.hparams.scheduler_params:
self.hparams.scheduler_params["base_lr"] = self.lr / 10
if "max_lr" in self.hparams.scheduler_params:
self.hparams.scheduler_params["max_lr"] = self.lr * 10
if self.scheduler is not None:
scheduler = self.scheduler(
optimizer,
**self.hparams.scheduler_params,
)
return [optimizer], [scheduler]
else:
return [optimizer]
@staticmethod
def aggregate_step_outputs(outputs):
y_true = np.vstack([output[0] for output in outputs])
y_pred = np.vstack([output[1] for output in outputs])
return y_true, y_pred
def _log_metrics(self, y_true, y_pred, subset) -> None:
metrics = self.evaluator.get_metrics(y_true, y_pred)
for metric, value in metrics.items():
self.log(f"{subset}_{metric}", value, logger=True, sync_dist=True, reduce_fx="mean")
def training_step(self, batch, batch_idx):
x, y = batch
y = torch.nn.functional.one_hot(y, num_classes=self.num_classes).to(torch.float)
logits = self(x)
loss = self.loss_fn(logits, y)
self.log("train_loss", loss, sync_dist=True, reduce_fx="mean")
return loss
def _val_step(self, batch, batch_idx, subset):
x, y = batch
y = torch.nn.functional.one_hot(y, num_classes=self.num_classes).to(torch.float)
y_hat = self(x)
loss = self.loss_fn(y_hat, y)
self.log(f"{subset}_loss", loss.detach(), sync_dist=True, reduce_fx="mean")
return (
y.cpu().numpy().astype(int),
y_hat.cpu().numpy(),
)
def validation_step(self, batch, batch_idx):
output = self._val_step(batch, batch_idx, "val")
self.validation_step_outputs.append(output)
return output
def test_step(self, batch, batch_idx):
output = self._val_step(batch, batch_idx, "test")
self.test_step_outputs.append(output)
return output
def on_validation_epoch_end(self):
y_true, y_pred = self.aggregate_step_outputs(self.validation_step_outputs)
self._log_metrics(y_true, y_pred, "val")
self.validation_step_outputs.clear()
def on_test_epoch_end(self):
y_true, y_pred = self.aggregate_step_outputs(self.test_step_outputs)
self._log_metrics(y_true, y_pred, "test")
confusion_matrix_image = self.evaluator.confusion_matrix_plot(y_true, y_pred)
if confusion_matrix_image is not None:
self.logger.experiment.log_image(
self.logger.run_id, confusion_matrix_image, artifact_file="confusion_matrix.png"
)
self.test_step_outputs.clear()
def on_train_end(self):
# no checkpoint callback when tuning parameters (e.g., finding learning rate), so skip save in that case.
if (
getattr(self.trainer, "checkpoint_callback", None) is not None
and self.trainer.checkpoint_callback.best_model_path
):
self.logger.experiment.log_artifact(
self.logger.run_id,
self.trainer.checkpoint_callback.best_model_path,
artifact_path="model",
)
def to_disk(self, path: os.PathLike):
state_dict = self.state_dict()
if "loss_fn.weight" in state_dict:
# remove weights for loss if required
del state_dict["loss_fn.weight"]
checkpoint = {
"state_dict": state_dict,
"hyper_parameters": self.hparams,
"global_step": self.global_step,
"pytorch-lightning_version": pl.__version__,
}
torch.save(checkpoint, path)
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/data.py | zamba/images/data.py | import copy
import os
from itertools import repeat
from pathlib import Path
from typing import Optional
import pandas as pd
import pytorch_lightning as pl
from loguru import logger
from megadetector.detection import run_detector
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from tqdm import tqdm
from tqdm.contrib.concurrent import process_map
from zamba.images.bbox import (
absolute_bbox,
crop_to_bounding_box,
get_cache_filename,
load_image,
BboxLayout,
)
class ImageClassificationDataset(Dataset):
def __init__(self, data_dir: Path, annotations: pd.DataFrame, transform) -> None:
self.annotations = annotations
self.data_dir = data_dir
self.transform = transform
def _get_image_path(self, item) -> Path:
if "cached_bbox" in item:
return item["cached_bbox"]
else:
return self.data_dir / item["filepath"]
def __len__(self):
return len(self.annotations)
def __getitem__(self, index):
item = self.annotations.iloc[index]
label = item["label"]
img_path = self._get_image_path(item)
with img_path.open("rb") as fp:
image = Image.open(fp)
image = image.convert("RGB")
if self.transform:
image = self.transform(image)
return image, int(label)
class ImageClassificationDataModule(pl.LightningDataModule):
def __init__(
self,
data_dir: Path,
annotations: pd.DataFrame,
cache_dir: Path,
crop_images: bool,
batch_size: int = 16,
num_workers: Optional[int] = None,
train_transforms=None,
test_transforms=None,
detection_threshold: float = 0.2,
) -> None:
super().__init__()
if train_transforms is None:
train_transforms = transforms.Compose([transforms.ToTensor()])
if test_transforms is None:
test_transforms = transforms.Compose([transforms.ToTensor()])
self.data_dir = data_dir
self.cache_dir = cache_dir
self.batch_size = batch_size
self.train_transforms = train_transforms
self.test_transforms = test_transforms
self.detection_threshold = detection_threshold
if num_workers is None:
num_workers = os.cpu_count()
self.num_workers = num_workers
self.annotations = annotations
if crop_images:
self.annotations = self.preprocess_annotations(annotations)
def preprocess_annotations(self, annotations: pd.DataFrame) -> pd.DataFrame:
num_annotations = len(annotations)
bbox_in_df = all(column in annotations.columns for column in ["x1", "x2", "y1", "y2"])
if bbox_in_df:
logger.info(
f"Bboxes found in annotations. Cropping images and save to cache_dir: {self.cache_dir}"
)
processed_annotations = process_map(
crop_to_bounding_box,
annotations.iterrows(),
repeat(self.cache_dir),
repeat(self.data_dir),
total=len(annotations),
desc="Cropping images",
)
annotations = pd.DataFrame(processed_annotations)
else:
processed_annotations = []
detector = run_detector.load_detector(
"MDV5A", force_cpu=(os.getenv("RUNNER_OS") == "macOS")
)
for _, row in tqdm(
annotations.iterrows(),
total=len(annotations),
desc="Running megadetector to extract bboxes.",
):
filepath = (
row["filepath"] if self.data_dir is None else self.data_dir / row["filepath"]
)
image = load_image(filepath)
result = detector.generate_detections_one_image(
image, row["filepath"], detection_threshold=self.detection_threshold
)
for detection in result["detections"]:
detection_row = copy.copy(row)
detection_row["detection_conf"] = detection["conf"]
detection_row["detection_category"] = detection["category"]
bbox = absolute_bbox(image, detection["bbox"], bbox_layout=BboxLayout.XYWH)
cache_path = self.cache_dir / get_cache_filename(
detection_row["filepath"], bbox
)
if not cache_path.exists():
cache_path.parent.mkdir(parents=True, exist_ok=True)
cropped_image = image.crop(bbox)
with open(cache_path, "wb") as f:
cropped_image.save(f)
(
detection_row["x1"],
detection_row["x2"],
detection_row["y1"],
detection_row["y2"],
) = (bbox[0], bbox[2], bbox[1], bbox[3])
detection_row["cached_bbox"] = cache_path.resolve().absolute()
processed_annotations.append(detection_row)
annotations = pd.DataFrame(processed_annotations)
logger.info(
f"Number of objects before preprocessing: {num_annotations}, "
f"number of objects after preprocessing: {len(annotations)}"
)
return annotations
def train_dataloader(self) -> DataLoader:
return DataLoader(
ImageClassificationDataset(
self.data_dir,
self.annotations[self.annotations["split"] == "train"],
self.train_transforms,
),
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
)
def val_dataloader(self) -> DataLoader:
return DataLoader(
ImageClassificationDataset(
self.data_dir,
self.annotations[self.annotations["split"] == "val"],
self.test_transforms,
),
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def test_dataloader(self) -> DataLoader:
return DataLoader(
ImageClassificationDataset(
self.data_dir,
self.annotations[self.annotations["split"] == "test"],
self.test_transforms,
),
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/bbox.py | zamba/images/bbox.py | from enum import StrEnum
from pathlib import Path
from typing import Iterable, Tuple, Union
from loguru import logger
import pandas as pd
from PIL import Image
class BboxInputFormat(StrEnum):
COCO = "coco"
MEGADETECTOR = "megadetector"
class BboxLayout(StrEnum):
XYXY = "xyxy"
XYWH = "xywh"
def bbox_json_to_df(
bbox_json: dict, bbox_format: BboxInputFormat = BboxInputFormat.COCO
) -> pd.DataFrame:
if bbox_format == BboxInputFormat.COCO:
logger.info("Processing bounding box labels from coco format")
images = pd.DataFrame(bbox_json["images"])[["id", "file_name"]]
images["filepath"] = images["file_name"]
images = images.drop("file_name", axis=1)
annotations = pd.DataFrame(bbox_json["annotations"])[
["id", "image_id", "category_id", "bbox"]
]
annotations = annotations[~annotations["bbox"].isna()]
annotations[["x", "y", "width", "height"]] = pd.DataFrame(annotations["bbox"].tolist())
categories = pd.DataFrame(bbox_json["categories"])[["id", "name"]]
categories.rename(columns={"name": "label"}, inplace=True)
result = pd.merge(
images,
annotations,
left_on="id",
right_on="image_id",
how="right",
suffixes=("_image", "_annotation"),
)
result = pd.merge(
result,
categories,
left_on="category_id",
right_on="id",
how="left",
suffixes=("_image", "_category"),
)
if all(column in result.columns for column in ["x", "y", "width", "height"]):
result.rename({"x": "x1", "y": "y1"}, axis=1, inplace=True)
result["x2"] = result["x1"] + result["width"]
result["y2"] = result["y1"] + result["height"]
return result
elif bbox_format == BboxInputFormat.MEGADETECTOR:
logger.info("Processing bounding box labels from megadetector format")
detection_categories = bbox_json["detection_categories"]
classification_categories = bbox_json["classification_categories"]
out_rows = []
for img_ix, image in enumerate(bbox_json["images"]):
if image.get("detections") is None:
continue
for d_ix, detection in enumerate(image.get("detections", [])):
# skip if bbox is 0 height or width
if detection["bbox"][2] <= 0 or detection["bbox"][3] <= 0:
continue
detection_classification = detection.get("classifications")
if detection_classification is None:
continue
out_rows.append(
{
"image_id": img_ix,
"detection_id": d_ix,
"id": f"{img_ix}_{d_ix}",
"filepath": image["file"],
"label": classification_categories[detection_classification[0][0]],
"label_id": detection_classification[0][0],
"label_confidence": detection_classification[0][1],
"detection_label": detection_categories[detection["category"]],
"detection_label_id": detection["category"],
"detection_confidence": detection["conf"],
"x1": detection["bbox"][0],
"y1": detection["bbox"][1],
"x2": detection["bbox"][0] + detection["bbox"][2],
"y2": detection["bbox"][1] + detection["bbox"][3],
}
)
return pd.DataFrame(out_rows).set_index("id")
else:
raise ValueError(
f"Invalid bbox_format: {bbox_format}; expected one of {BboxInputFormat.__members__.keys()}"
)
def absolute_bbox(
img: Image,
bbox: Union[Iterable[float], Iterable[int]],
bbox_layout: BboxLayout = BboxLayout.XYXY,
) -> Tuple[int, int, int, int]:
"""Ensures a bbox is in absolute pixel units. Turns relative to absolute.
This assumes the origin is top-left.
Returns:
tuple[int, int, int, int]: x1 (left), y1 (top), x2 (right), y2 (bottom)
"""
if all([n <= 1 for n in bbox]):
shape = img.size
if bbox_layout == BboxLayout.XYWH:
x1, y1, width, height = [
int(bbox[0] * shape[0]),
int(bbox[1] * shape[1]),
int(bbox[2] * shape[0]),
int(bbox[3] * shape[1]),
]
x2, y2 = x1 + width, y1 + height
elif bbox_layout == BboxLayout.XYXY:
x1, y1, x2, y2 = [
int(bbox[0] * shape[0]),
int(bbox[1] * shape[1]),
int(bbox[2] * shape[0]),
int(bbox[3] * shape[1]),
]
else:
raise ValueError(
f"Invalid bbox_format: {bbox_layout}; expected one of {BboxInputFormat.__members__.keys()}"
)
return x1, y1, x2, y2
else:
return bbox
def crop_to_bounding_box(row, cache_dir, image_dir: Path | None = None) -> Image:
_, row = row # Unpack the index and the row
filepath = row["filepath"] if image_dir is None else image_dir / row["filepath"]
image = load_image(filepath)
bbox = absolute_bbox(
image, [row["x1"], row["y1"], row["x2"], row["y2"]], bbox_layout=BboxLayout.XYXY
)
row["x1"], row["y1"], row["x2"], row["y2"] = bbox
cache_path = cache_dir / get_cache_filename(row["filepath"], bbox)
if not cache_path.exists():
cache_path.parent.mkdir(parents=True, exist_ok=True)
cropped_image = image.crop(bbox)
with open(cache_path, "wb") as f:
cropped_image.save(f)
row["cached_bbox"] = cache_path.resolve().absolute()
return row
def load_image(img_path) -> Image:
with open(img_path, "rb") as fp:
image = Image.open(fp)
image = image.convert("RGB")
return image
def get_cache_filename(filepath: str, bbox) -> Path:
path = Path(filepath)
if path.is_absolute():
path = path.relative_to("/")
return path.parent / f"{path.stem}_{bbox[0]}_{bbox[1]}_{bbox[2]}_{bbox[3]}{path.suffix}"
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/dataset/dataset.py | zamba/images/dataset/dataset.py | import json
import math
import os
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional
import boto3
import pandas as pd
import typer
import yaml
from botocore import UNSIGNED
from botocore.config import Config
from botocore.exceptions import ClientError
from loguru import logger
from PIL import Image
from tqdm.contrib.concurrent import process_map
app = typer.Typer()
def _remove_prefix(text: str, prefix: str) -> str:
if text.startswith(prefix):
return text[len(prefix) :]
return text
def load_image_from_bucket(source: str) -> Image.Image:
s3 = boto3.resource("s3", config=Config(signature_version=UNSIGNED))
source = _remove_prefix(source, "s3://")
bucket_name, object_key = source.split("/", 1)
bucket = s3.Bucket(bucket_name)
obj = bucket.Object(object_key)
response = obj.get()
file_stream = response["Body"]
img = Image.open(file_stream)
return img
def split(value, splits) -> str:
if str(value) in splits.get("train", []):
return "train"
elif str(value) in splits.get("test", []):
return "test"
elif str(value) in splits.get("val", []):
return "val"
else:
return "other"
def prepare_dataset(
annotations: Dict[str, Any],
splits: Dict[str, Any],
storage_path: str,
categories_name_mapper: Dict[str, str],
name: str,
):
if not storage_path.endswith("/"):
storage_path += "/"
if "splits" in splits:
splits = splits["splits"]
df_annotations = pd.DataFrame(annotations["annotations"])
df_images = pd.DataFrame(annotations["images"])
df = pd.merge(
df_annotations,
df_images,
left_on="image_id",
right_on="id",
how="inner",
suffixes=("_annotations", "_images"),
)
# remove rows with non-existing files
if annotations.get("info", {}).get("contributor", "").lower() == "wcs":
df = df[df["file_name"].apply(lambda x: x[:7] != "humans/")]
df = df[df["bbox"].notnull()]
df["split"] = df["location"].apply(lambda x: split(x, splits))
df["source"] = storage_path + df["file_name"] # storage path to file
df["id"] = name + df["id_annotations"] # unique ID in merged dataset
# map categories and drop categories non-included in config
categories_map = {x["id"]: x["name"] for x in annotations["categories"]}
df["category_name"] = df["category_id"].apply(lambda x: categories_map[x])
df["category"] = df["category_name"].apply(lambda x: categories_name_mapper.get(x, None))
df = df[df["category"].notnull()]
return df
def merge_datasets(
dataset_config: Path,
) -> pd.DataFrame:
with open(dataset_config, "r") as f:
config = yaml.safe_load(f)
categories_name_mapper = config["categories"]["map"]
annotations_list = []
splits_list = []
storages = []
names = []
for dataset in config["datasets"]:
with open(dataset["annotations"], "r") as f:
annotations_list.append(json.load(f))
with open(dataset["splits"], "r") as f:
splits_list.append(json.load(f))
storages.append(dataset["storage"])
names.append(dataset["name"])
data_frames = []
for annotations, splits, storage_path, name in zip(
annotations_list, splits_list, storages, names
):
data_frames.append(
prepare_dataset(annotations, splits, storage_path, categories_name_mapper, name)
)
df = pd.concat(data_frames, ignore_index=True)
df["filepath"] = df["id"] + "." + df["source"].str.split(".").str[-1]
df["label"] = df["category_name"]
return df
def crop_image(img: Image, bbox: List) -> Image.Image:
"""
Crop image using annotation bbox.
Args:
img (Image): Original image
bbox (list): A list containing four elements representing the bounding box: [x_min, y_min, width, height].
Returns:
Cropped Image.
Notes:
x_min (float): The x-coordinate of the top-left corner of the bounding box.
y_min (float): The y-coordinate of the top-left corner of the bounding box.
width (float): The width of the bounding box.
height (float): The height of the bounding box.
"""
bbox = [int(coord) for coord in bbox]
return img.crop((bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]))
def process_files(data_dir: Path, annot: Dict, download: bool = True) -> Optional[Dict]:
image_path = data_dir / annot["filepath"]
if os.path.exists(image_path):
return annot
if not download:
return None
try:
img = load_image_from_bucket(annot["source"])
img = crop_image(img, annot["bbox"])
with image_path.open("w") as output_file:
img.save(output_file)
except ClientError:
logger.warning(f"No such key {annot['source']}!")
return None
except (Exception,):
logger.warning(f"Exception for {annot['source']}!")
return None
return annot
@app.command(help="Load and preprocess datasets from config file")
def load_data(
data_dir: Path = typer.Option(
..., exists=True, file_okay=False, dir_okay=True, help="Path to the data directory."
),
dataset_config: Path = typer.Option(
..., exists=True, file_okay=True, dir_okay=False, help="Path to the config file."
),
result_path: Path = typer.Option(
..., exists=False, file_okay=True, dir_okay=False, help="Path to the result file."
),
download_data: bool = typer.Option(
False, "--download", help="Download and process dataset images."
),
) -> None:
"""
Preprocesses datasets from the specified images directory.
Args:
data_dir (Path): Path to the data directory.
dataset_config (Path): Path to the dataset config .yaml file.
result_path (Path): Path to the result file (.csv) with 'filepath', 'label' and 'split' columns.
download_data (bool): Download and process dataset images. Boolean flag, default is False
"""
annotations = merge_datasets(dataset_config)
if download_data:
data = process_map(
partial(process_files, data_dir, download=False),
annotations.to_dict(orient="records"),
num_workers=os.cpu_count(),
chunksize=int(math.sqrt(len(annotations))),
)
data = [x for x in data if x is not None]
annotations = pd.DataFrame(data)
annotations[["filepath", "label", "split"]].to_csv(result_path)
if __name__ == "__main__":
app()
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/images/dataset/__init__.py | zamba/images/dataset/__init__.py | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false | |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/object_detection/__init__.py | zamba/object_detection/__init__.py | from zamba.object_detection.yolox.yolox_model import YoloXArgs, YoloXExp, YoloXModel
__all__ = ["YoloXArgs", "YoloXExp", "YoloXModel"]
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/object_detection/yolox/yolox_model.py | zamba/object_detection/yolox/yolox_model.py | from pathlib import Path
import os
import yaml
from pydantic import BaseModel
from yolox.exp import Exp
import yolox.utils as utils
class YoloXArgs(BaseModel):
"""Args for commandline training of yolox from:
train: https://github.com/Megvii-BaseDetection/YOLOX/blob/68408b4083f818f50aacc29881e6f97cd19fcef2/tools/train.py#L18-L96
eval: https://github.com/Megvii-BaseDetection/YOLOX/blob/68408b4083f818f50aacc29881e6f97cd19fcef2/tools/eval.py#L29-L111
"""
experiment_name: str = None
dist_backend: str = "nccl"
dist_url: str = None
batch_size: int = 64
devices: int = None
resume: bool = False
ckpt: Path = None
start_epoch: int = None
num_machines: int = 1
machine_rank: int = 0
fp16: bool = False
cache: bool = False
occupy: bool = False
logger: str = "tensorboard"
conf: float = None
nms: float = None
tsize: int = None
seed: int = None
fuse: bool = False
trt: bool = False
legacy: bool = False
test: bool = False
speed: bool = False
class YoloXExp(BaseModel):
# just the pieces that we want to be able to override from
# https://github.com/Megvii-BaseDetection/YOLOX/blob/68408b4083f818f50aacc29881e6f97cd19fcef2/yolox/exp/yolox_base.py
#
# See the above link for more detail on these options
# Missing options were intentionally omitted because they are overridden when loading a yolo-* experiment from:
# https://github.com/Megvii-BaseDetection/YOLOX/tree/68408b4083f818f50aacc29881e6f97cd19fcef2/exps/default
# model config
num_classes: int
# dataloader config
data_num_workers: int = 4
data_dir: str = None
train_ann: str = None
val_ann: str = None
test_ann: str = None
# training config
warmup_epochs: int = 5
max_epoch: int = 300
warmup_lr: float = 0.0
min_lr_ratio: float = 0.05
basic_lr_per_img: float = 0.01 / 64.0
scheduler: str = "yoloxwarmcos"
no_aug_epochs: int = 15
ema: bool = True
weight_decay: float = 5e-4
momentum: float = 0.9
print_interval: int = 10
eval_interval: int = 10
save_history_ckpt: bool = True
# test config
test_conf: float = 0.01
nmsthre: float = 0.65
class TinyExp(Exp):
# default tiny exp copied from:
# https://github.com/Megvii-BaseDetection/YOLOX/blob/main/exps/default/yolox_tiny.py
def __init__(self):
super(TinyExp, self).__init__()
self.depth = 0.33
self.width = 0.375
self.scale = (0.5, 1.5)
self.random_size = (10, 20)
self.test_size = (416, 416)
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
self.enable_mixup = False
class YoloXModel:
def __init__(self, exp: YoloXExp, args: YoloXArgs, gpus: int = None, image_size: int = None):
utils.configure_module()
# load base tiny exp
base_exp = TinyExp()
# override the yolo experiment default settings with any
# ones that we pass in
for k, v in exp.dict().items():
setattr(base_exp, k, v)
if not args.experiment_name:
args.experiment_name = base_exp.exp_name
self.exp = base_exp
self.args = args
gpus = gpus or args.devices
self.num_gpu = utils.get_num_devices() if gpus is None else gpus
assert self.num_gpu <= utils.get_num_devices()
if image_size is not None:
self.exp.input_size = (image_size, image_size)
if self.args.tsize is not None:
self.exp.test_size = (self.args.tsize, self.args.tsize)
@classmethod
def load(
cls,
checkpoint: os.PathLike,
model_kwargs_path: os.PathLike,
*args,
**kwargs,
):
model_kwargs = yaml.safe_load(Path(model_kwargs_path).read_text())
model_kwargs["ckpt"] = checkpoint
exp_dict = dict()
args_dict = dict()
# parse out which fields go to YoloXExp and which go to YoloXArgs
if model_kwargs is not None:
for k in model_kwargs.keys():
if k in YoloXArgs.__fields__.keys():
args_dict[k] = model_kwargs[k]
else:
exp_dict[k] = model_kwargs[k]
return cls(
YoloXExp(**exp_dict),
YoloXArgs(**args_dict),
image_size=model_kwargs["image_size"],
*args,
**kwargs,
)
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/object_detection/yolox/__init__.py | zamba/object_detection/yolox/__init__.py | python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false | |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/zamba/object_detection/yolox/megadetector_lite_yolox.py | zamba/object_detection/yolox/megadetector_lite_yolox.py | from enum import Enum
import os
from pathlib import Path
from typing import List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from PIL import Image, ImageOps
from pydantic import BaseModel
import torch
from tqdm import tqdm
from yolox.utils.boxes import postprocess
from zamba.object_detection import YoloXModel
LOCAL_MD_LITE_MODEL = Path(__file__).parent / "assets" / "yolox_tiny_640_20220528.pth"
LOCAL_MD_LITE_MODEL_KWARGS = (
Path(__file__).parent / "assets" / "yolox_tiny_640_20220528_model_kwargs.json"
)
class FillModeEnum(str, Enum):
"""Enum for frame filtering fill modes
Attributes:
repeat: Randomly resample qualifying frames to get to n_frames
score_sorted: Take up to n_frames in sort order (even if some have zero probability)
weighted_euclidean: Sample the remaining frames weighted by their euclidean distance in
time to the frames over the threshold
weighted_prob: Sample the remaining frames weighted by their predicted probability
"""
repeat = "repeat"
score_sorted = "score_sorted"
weighted_euclidean = "weighted_euclidean"
weighted_prob = "weighted_prob"
class MegadetectorLiteYoloXConfig(BaseModel):
"""Configuration for a MegadetectorLiteYoloX frame selection model
Attributes:
confidence (float): Only consider object detections with this confidence or greater
nms_threshold (float): Non-maximum suppression is a method for filtering many bounding
boxes around the same object to a single bounding box. This is a constant that
determines how much to suppress similar bounding boxes.
image_width (int): Scale image to this width before sending to object detection model.
image_height (int): Scale image to this height before sending to object detection model.
device (str): Where to run the object detection model, "cpu" or "cuda".
frame_batch_size (int): Number of frames to predict on at once.
n_frames (int, optional): Max number of frames to return. If None returns all frames above
the threshold. Defaults to None.
fill_mode (str, optional): Mode for upsampling if the number of frames above the threshold
is less than n_frames. Defaults to "repeat".
sort_by_time (bool, optional): Whether to sort the selected frames by time (original order)
before returning. If False, returns frames sorted by score (descending). Defaults to
True.
seed (int, optional): Random state for random number generator. Defaults to 55.
"""
confidence: float = 0.25
nms_threshold: float = 0.45
image_width: int = 640
image_height: int = 640
device: str = "cuda" if torch.cuda.is_available() else "cpu"
frame_batch_size: int = 24
n_frames: Optional[int] = None
fill_mode: Optional[FillModeEnum] = FillModeEnum.score_sorted
sort_by_time: bool = True
seed: Optional[int] = 55
class Config:
extra = "forbid"
class MegadetectorLiteYoloX:
def __init__(
self,
path: os.PathLike = LOCAL_MD_LITE_MODEL,
kwargs: os.PathLike = LOCAL_MD_LITE_MODEL_KWARGS,
config: Optional[Union[MegadetectorLiteYoloXConfig, dict]] = None,
):
"""MegadetectorLite based on YOLOX.
Args:
path (pathlike): Path to trained YoloX model checkpoint (.pth extension)
config (MegadetectorLiteYoloXConfig): YoloX configuration
"""
if config is None:
config = MegadetectorLiteYoloXConfig()
elif isinstance(config, dict):
config = MegadetectorLiteYoloXConfig.parse_obj(config)
yolox = YoloXModel.load(
checkpoint=path,
model_kwargs_path=kwargs,
)
ckpt = torch.load(yolox.args.ckpt, weights_only=False, map_location=config.device)
model = yolox.exp.get_model()
model.load_state_dict(ckpt["model"])
model = model.eval().to(config.device)
self.model = model
self.yolox = yolox
self.config = config
self.num_classes = yolox.exp.num_classes
@staticmethod
def scale_and_pad_array(
image_array: np.ndarray, output_width: int, output_height: int
) -> np.ndarray:
return np.array(
ImageOps.pad(
Image.fromarray(image_array),
(output_width, output_height),
method=Image.BICUBIC,
color=None,
centering=(0, 0),
)
)
def _preprocess(self, frame: np.ndarray) -> np.ndarray:
"""Process an image for the model, including scaling/padding the image, transposing from
(height, width, channel) to (channel, height, width) and casting to float.
"""
arr = np.ascontiguousarray(
self.scale_and_pad_array(frame, self.config.image_width, self.config.image_height),
dtype=np.float32,
)
return np.moveaxis(arr, 2, 0)
def _preprocess_video(self, video: np.ndarray) -> np.ndarray:
"""Process a video for the model, including resizing the frames in the video, transposing
from (batch, height, width, channel) to (batch, channel, height, width) and casting to float.
"""
resized_video = np.zeros(
(video.shape[0], video.shape[3], self.config.image_height, self.config.image_width),
dtype=np.float32,
)
for frame_idx in range(video.shape[0]):
resized_video[frame_idx] = self._preprocess(video[frame_idx])
return resized_video
def detect_video(self, video_arr: np.ndarray, pbar: bool = False):
"""Runs object detection on an video.
Args:
video_arr (np.ndarray): An video array with dimensions (frames, height, width, channels).
pbar (int): Whether to show progress bar. Defaults to False.
Returns:
list: A list containing detections and score for each frame. Each tuple contains two arrays:
the first is an array of bounding box detections with dimensions (object, 4) where
object is the number of objects detected and the other 4 dimension are
(x1, y1, x2, y1). The second is an array of object detection confidence scores of
length (object) where object is the number of objects detected.
"""
pbar = tqdm if pbar else lambda x: x
# batch of frames
batch_size = self.config.frame_batch_size
video_outputs = []
with torch.no_grad():
for i in range(0, len(video_arr), batch_size):
a = video_arr[i : i + batch_size]
outputs = self.model(
torch.from_numpy(self._preprocess_video(a)).to(self.config.device)
)
outputs = postprocess(
outputs, self.num_classes, self.config.confidence, self.config.nms_threshold
)
video_outputs.extend(outputs)
detections = []
for o in pbar(video_outputs):
detections.append(
self._process_frame_output(
o, original_height=video_arr.shape[1], original_width=video_arr.shape[2]
)
)
return detections
def detect_image(self, img_arr: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Runs object detection on an image.
Args:
img_arr (np.ndarray): An image array with dimensions (height, width, channels).
Returns:
np.ndarray: An array of bounding box detections with dimensions (object, 4) where
object is the number of objects detected and the other 4 dimension are
(x1, y1, x2, y2).
np.ndarray: An array of object detection confidence scores of length (object) where
object is the number of objects detected.
"""
with torch.no_grad():
outputs = self.model(
torch.from_numpy(self._preprocess(img_arr)).unsqueeze(0).to(self.config.device)
)
output = postprocess(
outputs, self.num_classes, self.config.confidence, self.config.nms_threshold
)
return self._process_frame_output(output[0], img_arr.shape[0], img_arr.shape[1])
def _process_frame_output(self, output, original_height, original_width):
if output is None:
return np.array([]), np.array([])
else:
detections = pd.DataFrame(
output.cpu().numpy(),
columns=["x1", "y1", "x2", "y2", "score1", "score2", "class_num"],
).assign(score=lambda row: row.score1 * row.score2)
# Transform bounding box to be in terms of the original image dimensions
ratio = min(
self.config.image_width / original_width,
self.config.image_height / original_height,
)
detections[["x1", "y1", "x2", "y2"]] /= ratio
# Express bounding boxes in terms of proportions of original image dimensions
detections[["x1", "x2"]] /= original_width
detections[["y1", "y2"]] /= original_height
return detections[["x1", "y1", "x2", "y2"]].values, detections.score.values
def filter_frames(
self, frames: np.ndarray, detections: List[Tuple[float, float, float, float]]
) -> np.ndarray:
"""Filter video frames using megadetector lite.
Which frames are returned depends on the fill_mode and how many frames are above the
confidence threshold. If more than n_frames are above the threshold, the top n_frames are
returned. Otherwise add to those over threshold based on fill_mode. If none of these
conditions are met, returns all frames above the threshold.
Args:
frames (np.ndarray): Array of video frames to filter with dimensions (frames, height,
width, channels)
detections (list of tuples): List of detection results for each frame. Each element is
a tuple of the list of bounding boxes [array(x1, y1, x2, y2)] and the detection
probabilities, both as float
Returns:
np.ndarray: An array of video frames of length n_frames or shorter
"""
frame_scores = pd.Series(
[(np.max(score) if (len(score) > 0) else 0) for _, score in detections]
).sort_values(
ascending=False
) # reduce to one score per frame
selected_indices = frame_scores.loc[frame_scores > self.config.confidence].index
if self.config.n_frames is None:
# no minimum n_frames provided, just select all the frames with scores > threshold
pass
elif len(selected_indices) >= self.config.n_frames:
# num. frames with scores > threshold is greater than the requested number of frames
selected_indices = (
frame_scores[selected_indices]
.sort_values(ascending=False)
.iloc[: self.config.n_frames]
.index
)
elif len(selected_indices) < self.config.n_frames:
# num. frames with scores > threshold is less than the requested number of frames
# repeat frames that are above threshold to get to n_frames
rng = np.random.RandomState(self.config.seed)
if self.config.fill_mode == "repeat":
repeated_indices = rng.choice(
selected_indices,
self.config.n_frames - len(selected_indices),
replace=True,
)
selected_indices = np.concatenate((selected_indices, repeated_indices))
# take frames in sorted order up to n_frames, even if score is zero
elif self.config.fill_mode == "score_sorted":
selected_indices = (
frame_scores.sort_values(ascending=False).iloc[: self.config.n_frames].index
)
# sample up to n_frames, prefer points closer to frames with detection
elif self.config.fill_mode == "weighted_euclidean":
sample_from = frame_scores.loc[~frame_scores.index.isin(selected_indices)].index
# take one over euclidean distance to all points with detection
weights = [1 / np.linalg.norm(selected_indices - sample) for sample in sample_from]
# normalize weights
weights /= np.sum(weights)
sampled = rng.choice(
sample_from,
self.config.n_frames - len(selected_indices),
replace=False,
p=weights,
)
selected_indices = np.concatenate((selected_indices, sampled))
# sample up to n_frames, weight by predicted probability - only if some frames have nonzero prob
elif (self.config.fill_mode == "weighted_prob") and (len(selected_indices) > 0):
sample_from = frame_scores.loc[~frame_scores.index.isin(selected_indices)].index
weights = frame_scores[sample_from] / np.sum(frame_scores[sample_from])
sampled = rng.choice(
sample_from,
self.config.n_frames - len(selected_indices),
replace=False,
p=weights,
)
selected_indices = np.concatenate((selected_indices, sampled))
# sort the selected images back into their original order
if self.config.sort_by_time:
selected_indices = sorted(selected_indices)
return frames[selected_indices]
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_metrics.py | tests/test_metrics.py | import numpy as np
import pytest
from zamba.metrics import compute_species_specific_metrics
@pytest.fixture
def y_true():
y_true = np.array([1, 1, 1, 1, 0, 0, 0, 0])
# Mutually exclusive labels
return np.c_[1 - y_true, y_true]
@pytest.fixture
def y_pred():
y_pred = np.array([0, 1, 1, 1, 1, 1, 1, 0])
# Mutually exclusive predictions
return np.c_[1 - y_pred, y_pred]
def test_compute_species_specific_metrics(y_true, y_pred):
metrics = {
f"{metric_name}/{index}": metric
for metric_name, index, metric in compute_species_specific_metrics(y_true, y_pred)
}
assert metrics == {
"accuracy/0": 0.5,
"f1/0": 0.3333333333333333,
"precision/0": 0.5, # Label `0` predicted twice, only once was actually `0`
"recall/0": 0.25, # Of four `0` labels, only one was predicted correctly
"accuracy/1": 0.5,
"f1/1": 0.6,
"precision/1": 0.5, # Label `1` predicted six times, only three were actually `1`
"recall/1": 0.75, # Of four `1` labels, only three were predicted correctly
}
def test_compute_species_specific_metrics_with_labels(y_true, y_pred):
metrics = {
f"{metric_name}/{index}": metric
for metric_name, index, metric in compute_species_specific_metrics(
y_true, y_pred, labels=["frog", "caterpillar"]
)
}
assert metrics == {
"accuracy/frog": 0.5,
"f1/frog": 0.3333333333333333,
"precision/frog": 0.5, # Label `0` predicted twice, only once was actually `0`
"recall/frog": 0.25, # Of four `0` labels, only one was predicted correctly
"accuracy/caterpillar": 0.5,
"f1/caterpillar": 0.6,
"precision/caterpillar": 0.5, # Label `1` predicted six times, only three were actually `1`
"recall/caterpillar": 0.75, # Of four `1` labels, only three were predicted correctly
}
def test_compute_species_specific_metrics_wrong_number_of_labels(y_true, y_pred):
with pytest.raises(ValueError) as error:
list(
compute_species_specific_metrics(
y_true, y_pred, labels=["frog", "caterpillar", "squid"]
)
)
assert (
"The number of labels (3) must match the number of columns in y_true (2)."
== error.value.args[0]
)
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_depth.py | tests/test_depth.py | import appdirs
import numpy as np
import pandas as pd
from pathlib import Path
from pydantic import ValidationError
import pytest
from zamba.models.config import GPUS_AVAILABLE
from zamba.models.depth_estimation import DepthEstimationManager, DepthEstimationConfig
from conftest import ASSETS_DIR, TEST_VIDEOS_DIR
@pytest.fixture
def two_video_filepaths(tmp_path):
output_path = tmp_path / "filepaths.csv"
filepaths = [
# video from depth estimation competition to verify actual preds
str(ASSETS_DIR / "depth_tests" / "aava.mp4"),
# test asset video with no detections
str(TEST_VIDEOS_DIR / "data/raw/savanna/Grumeti_Tanzania/K38_check3/09190048_Hyena.AVI"),
]
pd.DataFrame(columns=["filepath"], data=filepaths).to_csv(output_path)
return output_path
def test_prediction(two_video_filepaths):
dem = DepthEstimationManager(
model_cache_dir=Path(appdirs.user_cache_dir()) / "zamba", gpus=GPUS_AVAILABLE
)
filepaths = pd.read_csv(two_video_filepaths).filepath.values
preds = dem.predict(filepaths)
# NB: we expect some small differences in number of detections across operating systems
assert len(preds) >= 80
assert preds.distance.notnull().sum() >= 40
assert preds.filepath.nunique() == 2
# predictions for reference video
ref_vid_preds = preds[preds.filepath == filepaths[0]].set_index("time")
# two animals found at time 30
assert len(ref_vid_preds.loc[30]) == 2
# confirm distance values
assert np.isclose(
ref_vid_preds.loc[[30, 40, 50]].distance.values,
[3.1, 3.1, 3.6, 3.6, 4.1],
).all()
# check nan rows exist for video with no detection
no_det_preds = preds[preds.filepath == filepaths[1]].set_index("time")
assert len(no_det_preds) == 15
assert no_det_preds.distance.isnull().all()
def test_duplicate_filepaths_are_ignored(tmp_path, two_video_filepaths):
# create duplicate file that gets skipped
df = pd.read_csv(two_video_filepaths)
double_df = pd.concat([df, df])
assert len(double_df) == len(df) * 2
config = DepthEstimationConfig(filepaths=double_df, save_to=tmp_path)
assert len(config.filepaths) == len(df)
def test_save_dir_and_overwrite(tmp_path, two_video_filepaths):
# create empty pred file to force use of overwrite
preds_path = tmp_path / "depth_estimation.csv"
preds_path.touch()
with pytest.raises(ValidationError):
DepthEstimationConfig(filepaths=two_video_filepaths, save_to=preds_path)
# this works if overwrite is passed
config = DepthEstimationConfig(
filepaths=two_video_filepaths, save_to=preds_path, overwrite=True
)
assert config.overwrite
def test_invalid_video_is_skipped(tmp_path):
# create invalid vid
invalid_video = tmp_path / "invalid_vid.mp4"
invalid_video.touch()
config = DepthEstimationConfig(
filepaths=pd.DataFrame(columns=["filepath"], data=[invalid_video]),
save_to=tmp_path / "preds.csv",
)
config.run_model()
# ensure outputs get written out and but is empty since video could not be loaded
preds = pd.read_csv(tmp_path / "preds.csv")
assert len(preds) == 0
assert (preds.columns == ["filepath", "time", "distance"]).all()
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_metadata.py | tests/test_metadata.py | import itertools
from string import ascii_letters
import numpy as np
import pandas as pd
from zamba.data.metadata import create_site_specific_splits, one_hot_to_labels
def test_site_specific_splits():
group = pd.Series(
list(
itertools.chain.from_iterable(
[element] * n for element, n in zip(ascii_letters[:20], range(20, 0, -1))
)
)
)
group_counts = group.value_counts()
split = create_site_specific_splits(group, {"train": 3, "val": 1, "holdout": 1})
assert split.value_counts().to_dict() == {
"train": group_counts.iloc[0::5].sum()
+ group_counts.iloc[3::5].sum()
+ group_counts.iloc[4::5].sum(),
"val": group_counts.iloc[1::5].sum(),
"holdout": group_counts.iloc[2::5].sum(),
}
def test_site_specific_splits_with_nulls():
group = pd.Series(
list(
itertools.chain.from_iterable(
[element] * n
for element, n in zip([None] + list(ascii_letters[:20]), range(20, 0, -1))
)
)
)
group_counts = group.value_counts()
split = create_site_specific_splits(
group, {"train": 3, "val": 1, "holdout": 1}, random_state=2345
)
notnull_split = split[group.notnull()]
assert notnull_split.value_counts().to_dict() == {
"train": group_counts.iloc[0::5].sum()
+ group_counts.iloc[3::5].sum()
+ group_counts.iloc[4::5].sum(),
"val": group_counts.iloc[1::5].sum(),
"holdout": group_counts.iloc[2::5].sum(),
}
null_split = split[group.isnull()]
null_split_group_counts = null_split.value_counts()
assert null_split_group_counts.to_dict() == {"train": 9, "holdout": 7, "val": 4}
def test_one_hot_to_labels():
values = np.eye(10) # All rows have at least one species
values[:5, 5:] = np.eye(5) # First five rows have two species
one_hot = pd.DataFrame(
values,
columns=[f"species_{name}" for name in ascii_letters[:10]],
index=[f"data/{i}" for i in range(10)],
)
one_hot["extra_column"] = 1
assert one_hot_to_labels(one_hot).to_dict(orient="records") == [
{"filepath": "data/0", "label": "a"},
{"filepath": "data/0", "label": "f"},
{"filepath": "data/1", "label": "b"},
{"filepath": "data/1", "label": "g"},
{"filepath": "data/2", "label": "c"},
{"filepath": "data/2", "label": "h"},
{"filepath": "data/3", "label": "d"},
{"filepath": "data/3", "label": "i"},
{"filepath": "data/4", "label": "e"},
{"filepath": "data/4", "label": "j"},
{"filepath": "data/5", "label": "f"},
{"filepath": "data/6", "label": "g"},
{"filepath": "data/7", "label": "h"},
{"filepath": "data/8", "label": "i"},
{"filepath": "data/9", "label": "j"},
]
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_densepose.py | tests/test_densepose.py | import os
import pytest
from pydantic import ValidationError
from zamba.data.video import VideoLoaderConfig
from zamba.models.densepose import DensePoseManager, DensePoseConfig
from zamba.models.densepose.densepose_manager import MODELS
from conftest import ASSETS_DIR
@pytest.fixture
def chimp_video_path():
return ASSETS_DIR / "densepose_tests" / "chimp.mp4"
@pytest.fixture
def chimp_image_path():
return ASSETS_DIR / "densepose_tests" / "chimp.jpg"
@pytest.mark.skipif(
not bool(int(os.environ.get("ZAMBA_RUN_DENSEPOSE_TESTS", 0))),
reason="""Skip the densepose specific tests unless environment variable \
ZAMBA_RUN_DENSEPOSE_TESTS is set to 1.""",
)
@pytest.mark.parametrize("model", ("animals", "chimps"))
def test_image(model, chimp_image_path, tmp_path):
dpm = DensePoseManager(model=MODELS[model])
# segmentation
image, preds = dpm.predict_image(chimp_image_path)
assert image.shape == (427, 640, 3)
assert len(preds) > 0
# serialize results
serialized = dpm.serialize_image_output(
preds, filename=tmp_path / f"output_{model}.json", write_embeddings=False
)
deserialized = dpm.deserialize_output(filename=tmp_path / f"output_{model}.json")
assert serialized is not None
assert (tmp_path / f"output_{model}.json").stat().st_size > 0
assert len(deserialized) == len(preds)
# visualize image
visualized_img_arr = dpm.visualize_image(
image, preds, output_path=(tmp_path / f"viz_image_{model}.jpg")
)
assert (tmp_path / f"viz_image_{model}.jpg").stat().st_size > 0
assert visualized_img_arr.shape == image.shape
assert (visualized_img_arr != image.shape).any()
# anantomy
if model == "chimps":
anatomy_info = dpm.anatomize_image(
visualized_img_arr, preds, output_path=(tmp_path / f"anatomized_{model}.csv")
)
# output to disk
assert anatomy_info.shape in [
(2, 44),
(1, 44),
] # depends on number of chimps identified; varies by version
assert (anatomy_info > 0).any().any()
assert (tmp_path / f"anatomized_{model}.csv").stat().st_size > 0
@pytest.mark.skipif(
not bool(int(os.environ.get("ZAMBA_RUN_DENSEPOSE_TESTS", 0))),
reason="""Skip the densepose specific tests unless environment variable \
ZAMBA_RUN_DENSEPOSE_TESTS is set to 1.""",
)
@pytest.mark.parametrize("model", ("animals", "chimps"))
def test_video(model, chimp_video_path, tmp_path):
dpm = DensePoseManager(model=MODELS[model])
# segmentation
vid, preds = dpm.predict_video(
chimp_video_path, video_loader_config=VideoLoaderConfig(fps=0.2)
)
assert vid.shape == (3, 180, 320, 3)
assert len(preds) > 0
# serialize results
serialized = dpm.serialize_video_output(
preds, filename=tmp_path / f"output_{model}.json", write_embeddings=False
)
deserialized = dpm.deserialize_output(filename=tmp_path / f"output_{model}.json")
assert serialized is not None
assert (tmp_path / f"output_{model}.json").stat().st_size > 0
assert len(deserialized) == len(preds)
# visualize image
visualized_vid_arr = dpm.visualize_video(
vid, preds, output_path=(tmp_path / f"viz_vid_{model}.mp4")
)
assert (tmp_path / f"viz_vid_{model}.mp4").stat().st_size > 0
assert visualized_vid_arr.shape == vid.shape
assert (visualized_vid_arr != vid).any()
# anantomy
if model == "chimps":
anatomy_info = dpm.anatomize_video(
visualized_vid_arr, preds, output_path=(tmp_path / f"anatomized_{model}.csv")
)
# output to disk
assert anatomy_info.shape[0] in [
8,
9,
10,
] # depends on number of chimps identified; varies by version
assert anatomy_info.shape[1] == 46
assert (anatomy_info > 0).any().any()
assert (tmp_path / f"anatomized_{model}.csv").stat().st_size > 0
@pytest.mark.skipif(
not bool(int(os.environ.get("ZAMBA_RUN_DENSEPOSE_TESTS", 0))),
reason="""Skip the densepose specific tests unless environment variable \
ZAMBA_RUN_DENSEPOSE_TESTS is set to 1.""",
)
@pytest.mark.parametrize("model", ("animals", "chimps"))
def test_denseposeconfig(model, tmp_path):
# validation failures
with pytest.raises(ValidationError):
DensePoseConfig(
video_loader_config=VideoLoaderConfig(fps=0.2),
output_type="bananas",
render_output=True,
embeddings_in_json=False,
data_dir=ASSETS_DIR / "densepose_tests",
save_dir=tmp_path,
)
dpc = DensePoseConfig(
video_loader_config=VideoLoaderConfig(fps=0.2),
output_type="segmentation" if model == "animals" else "chimp_anatomy",
render_output=True,
embeddings_in_json=False,
data_dir=ASSETS_DIR / "densepose_tests",
save_dir=tmp_path,
)
dpc.run_model()
# ensure all outputs are saved in save_dir
assert (tmp_path / "chimp_denspose_video.mp4").exists()
assert (tmp_path / "chimp_denspose_labels.json").exists()
if model == "chimp_anatomy":
assert (tmp_path / "chimp_denspose_anatomy.csv").exists()
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_config.py | tests/test_config.py | from pathlib import Path
import pytest
import appdirs
import numpy as np
import pandas as pd
from pydantic import ValidationError
from zamba.models.config import (
EarlyStoppingConfig,
ModelConfig,
PredictConfig,
SchedulerConfig,
TrainConfig,
)
from conftest import ASSETS_DIR, TEST_VIDEOS_DIR
@pytest.fixture()
def mock_download_weights(mocker):
mock = mocker.patch("zamba.models.config.download_weights")
mock.return_value = "dummy_model_checkpoint.ckpt"
return mock
@pytest.fixture()
def mock_model_species(mocker):
mock = mocker.patch("zamba.models.config.get_model_species")
mock.return_value = ["elephant", "gorilla"]
return mock
def test_train_data_dir_only():
with pytest.raises(ValidationError) as error:
TrainConfig(data_dir=TEST_VIDEOS_DIR)
# labels is missing
assert error.value.errors() == [
{"loc": ("labels",), "msg": "field required", "type": "value_error.missing"}
]
def test_train_data_dir_and_labels(tmp_path, labels_relative_path, labels_absolute_path):
# correct data dir
config = TrainConfig(
data_dir=TEST_VIDEOS_DIR, labels=labels_relative_path, save_dir=tmp_path / "my_model"
)
assert config.data_dir is not None
assert config.labels is not None
# data dir ignored if absolute path provided in filepath
config = TrainConfig(
data_dir=tmp_path, labels=labels_absolute_path, save_dir=tmp_path / "my_model"
)
assert config.data_dir is not None
assert config.labels is not None
assert not config.labels.filepath.str.startswith(str(tmp_path)).any()
# incorrect data dir with relative filepaths
with pytest.raises(ValidationError) as error:
TrainConfig(
data_dir=ASSETS_DIR, labels=labels_relative_path, save_dir=tmp_path / "my_model"
)
assert "None of the video filepaths exist" in error.value.errors()[0]["msg"]
def test_train_labels_only(labels_absolute_path, tmp_path):
config = TrainConfig(labels=labels_absolute_path, save_dir=tmp_path / "my_model")
assert config.labels is not None
def test_predict_data_dir_only():
config = PredictConfig(data_dir=TEST_VIDEOS_DIR)
assert config.data_dir == TEST_VIDEOS_DIR
assert isinstance(config.filepaths, pd.DataFrame)
assert sorted(config.filepaths.filepath.values) == sorted(
[str(f) for f in TEST_VIDEOS_DIR.rglob("*") if f.is_file()]
)
assert config.filepaths.columns == ["filepath"]
def test_predict_data_dir_and_filepaths(labels_absolute_path, labels_relative_path):
# correct data dir
config = PredictConfig(data_dir=TEST_VIDEOS_DIR, filepaths=labels_relative_path)
assert config.data_dir is not None
assert config.filepaths is not None
assert config.filepaths.filepath.str.startswith(str(TEST_VIDEOS_DIR)).all()
# incorrect data dir
with pytest.raises(ValidationError) as error:
PredictConfig(data_dir=ASSETS_DIR, filepaths=labels_relative_path)
assert "None of the video filepaths exist" in error.value.errors()[0]["msg"]
def test_predict_filepaths_only(labels_absolute_path):
config = PredictConfig(filepaths=labels_absolute_path)
assert config.filepaths is not None
def test_filepath_column(tmp_path, labels_absolute_path):
pd.read_csv(labels_absolute_path).rename(columns={"filepath": "video"}).to_csv(
tmp_path / "bad_filepath_column.csv"
)
# predict: filepaths
with pytest.raises(ValidationError) as error:
PredictConfig(filepaths=tmp_path / "bad_filepath_column.csv")
assert "must contain a `filepath` column" in error.value.errors()[0]["msg"]
# train: labels
with pytest.raises(ValidationError) as error:
TrainConfig(labels=tmp_path / "bad_filepath_column.csv", save_dir=tmp_path / "my_model")
assert "must contain `filepath` and `label` columns" in error.value.errors()[0]["msg"]
def test_label_column(tmp_path, labels_absolute_path):
pd.read_csv(labels_absolute_path).rename(columns={"label": "animal"}).to_csv(
tmp_path / "bad_label_column.csv"
)
with pytest.raises(ValidationError) as error:
TrainConfig(labels=tmp_path / "bad_label_column.csv", save_dir=tmp_path / "my_model")
assert "must contain `filepath` and `label` columns" in error.value.errors()[0]["msg"]
def test_extra_column(tmp_path, labels_absolute_path):
# add extra column that has species_ prefix
df = pd.read_csv(labels_absolute_path)
df["species_VE"] = "duiker"
df.to_csv(
tmp_path / "extra_species_col.csv",
index=False,
)
# this column is not one hot encoded
config = TrainConfig(
labels=tmp_path / "extra_species_col.csv",
save_dir=tmp_path / "my_model",
use_default_model_labels=False,
)
assert list(config.labels.columns) == [
"filepath",
"split",
"species_antelope_duiker",
"species_elephant",
"species_gorilla",
]
# extra columns are excluded in predict config
config = PredictConfig(filepaths=tmp_path / "extra_species_col.csv")
assert config.filepaths.columns == ["filepath"]
def test_one_video_does_not_exist(tmp_path, labels_absolute_path, caplog):
files_df = pd.read_csv(labels_absolute_path)
# add a fake file
files_df = pd.concat(
[
files_df,
pd.DataFrame(
{"filepath": "fake_file.mp4", "label": "gorilla", "split": "train"}, index=[0]
),
],
ignore_index=True,
)
files_df.to_csv(tmp_path / "labels_with_fake_video.csv")
config = PredictConfig(filepaths=tmp_path / "labels_with_fake_video.csv")
assert "Skipping 1 file(s) that could not be found" in caplog.text
# one fewer file than in original list since bad file is skipped
assert len(config.filepaths) == (len(files_df) - 1)
config = TrainConfig(
labels=tmp_path / "labels_with_fake_video.csv", save_dir=tmp_path / "my_model"
)
assert "Skipping 1 file(s) that could not be found" in caplog.text
assert len(config.labels) == (len(files_df) - 1)
def test_videos_cannot_be_loaded(tmp_path, labels_absolute_path, caplog):
files_df = pd.read_csv(labels_absolute_path)
# create bad files
for i in np.arange(2):
bad_file = tmp_path / f"bad_file_{i}.mp4"
bad_file.touch()
files_df = pd.concat(
[
files_df,
pd.DataFrame(
{"filepath": bad_file, "label": "gorilla", "split": "train"}, index=[0]
),
],
ignore_index=True,
)
files_df.to_csv(tmp_path / "labels_with_non_loadable_videos.csv")
config = PredictConfig(filepaths=tmp_path / "labels_with_non_loadable_videos.csv")
assert "Skipping 2 file(s) that could not be loaded with ffmpeg" in caplog.text
assert len(config.filepaths) == (len(files_df) - 2)
config = TrainConfig(
labels=tmp_path / "labels_with_non_loadable_videos.csv", save_dir=tmp_path / "my_model"
)
assert "Skipping 2 file(s) that could not be loaded with ffmpeg" in caplog.text
assert len(config.labels) == (len(files_df) - 2)
def test_empty_model_config():
with pytest.raises(ValueError) as error:
ModelConfig()
assert (
"Must provide either `train_config` or `predict_config`" in error.value.errors()[0]["msg"]
)
def test_early_stopping_mode():
assert EarlyStoppingConfig(monitor="val_macro_f1").mode == "max"
assert EarlyStoppingConfig(monitor="val_loss").mode == "min"
with pytest.raises(ValidationError) as error:
# if you really want to do the wrong thing you have to be explicit about it
EarlyStoppingConfig(monitor="val_loss", mode="max")
assert "Provided mode max is incorrect for val_loss monitor." == error.value.errors()[0]["msg"]
def test_labels_with_all_null_species(labels_absolute_path, tmp_path):
labels = pd.read_csv(labels_absolute_path)
labels["label"] = np.nan
with pytest.raises(ValueError) as error:
TrainConfig(labels=labels, save_dir=tmp_path / "my_model")
assert "Species cannot be null for all videos." == error.value.errors()[0]["msg"]
def test_labels_with_partially_null_species(
labels_absolute_path, caplog, tmp_path, mock_download_weights, mock_model_species
):
labels = pd.read_csv(labels_absolute_path)
labels.loc[0, "label"] = np.nan
TrainConfig(labels=labels, save_dir=tmp_path / "my_model")
assert "Found 1 filepath(s) with no label. Will skip." in caplog.text
def test_binary_labels_no_blank(
labels_absolute_path, tmp_path, mock_download_weights, mock_model_species
):
labels = pd.read_csv(labels_absolute_path)
labels["label"] = np.where(labels.label != "antelope_duiker", "something_else", labels.label)
assert labels.label.nunique() == 2
# get processed labels dataframe
labels_df = TrainConfig(labels=labels, save_dir=tmp_path / "my_model").labels
# only one label column because this is binary
# first column alphabetically is kept when blank is not present
assert labels_df.filter(regex="species_").columns == ["species_antelope_duiker"]
def test_binary_labels_with_blank(
labels_absolute_path, tmp_path, mock_download_weights, mock_model_species
):
labels = pd.read_csv(labels_absolute_path)
labels["label"] = np.where(labels.label != "antelope_duiker", "Blank", labels.label)
labels_df = TrainConfig(labels=labels, save_dir=tmp_path / "my_model").labels
# blank is the kept column (regardless of case)
assert labels_df.filter(regex="species_").columns == ["species_blank"]
def test_labels_with_all_null_split(
labels_absolute_path, caplog, tmp_path, mock_download_weights, mock_model_species
):
labels = pd.read_csv(labels_absolute_path)
labels["split"] = np.nan
TrainConfig(labels=labels, save_dir=tmp_path / "my_model")
assert "Split column is entirely null. Will generate splits automatically" in caplog.text
def test_labels_with_partially_null_split(
labels_absolute_path, tmp_path, mock_download_weights, mock_model_species
):
labels = pd.read_csv(labels_absolute_path)
labels.loc[0, "split"] = np.nan
with pytest.raises(ValueError) as error:
TrainConfig(labels=labels, save_dir=tmp_path / "my_model")
assert (
"Found 1 row(s) with null `split`. Fill in these rows with either `train`, `val`, or `holdout`"
) in error.value.errors()[0]["msg"]
def test_labels_with_invalid_split(
labels_absolute_path, tmp_path, mock_download_weights, mock_model_species
):
labels = pd.read_csv(labels_absolute_path)
labels.loc[0, "split"] = "test"
with pytest.raises(ValueError) as error:
TrainConfig(labels=labels, save_dir=tmp_path / "my_model")
assert (
"Found the following invalid values for `split`: {'test'}. `split` can only contain `train`, `val`, or `holdout.`"
) == error.value.errors()[0]["msg"]
def test_labels_no_splits(labels_no_splits, tmp_path, mock_download_weights, mock_model_species):
# ensure species are allocated to both sets
labels_four_videos = pd.read_csv(labels_no_splits).head(4)
labels_four_videos["label"] = ["gorilla"] * 2 + ["elephant"] * 2
_ = TrainConfig(
data_dir=TEST_VIDEOS_DIR,
labels=labels_four_videos,
save_dir=tmp_path,
split_proportions=dict(train=1, val=1, holdout=0),
)
split = pd.read_csv(tmp_path / "splits.csv")["split"].values
assert (split == ["train", "val", "train", "val"]).all()
# remove the first row which puts antelope_duiker at 2 instead of 3
labels_with_too_few_videos = pd.read_csv(labels_no_splits).iloc[1:, :]
with pytest.raises(ValueError) as error:
TrainConfig(
data_dir=TEST_VIDEOS_DIR,
labels=labels_with_too_few_videos,
save_dir=tmp_path,
)
assert (
"Not all species have enough media files to allocate into the following splits: train, val, holdout. A minimum of 3 media files per label is required. Found the following counts: {'antelope_duiker': 2}. Either remove these labels or add more images/videos."
) == error.value.errors()[0]["msg"]
def test_labels_split_proportions(
labels_no_splits, tmp_path, mock_download_weights, mock_model_species
):
config = TrainConfig(
data_dir=TEST_VIDEOS_DIR,
labels=labels_no_splits,
split_proportions={"a": 3, "b": 1},
save_dir=tmp_path,
)
assert config.labels.split.value_counts().to_dict() == {"a": 13, "b": 6}
def test_from_scratch(labels_absolute_path, tmp_path, mock_download_weights, mock_model_species):
config = TrainConfig(
labels=labels_absolute_path,
from_scratch=True,
checkpoint=None,
save_dir=tmp_path / "my_model",
)
assert config.model_name == "time_distributed"
assert config.from_scratch
assert config.checkpoint is None
with pytest.raises(ValueError) as error:
TrainConfig(
labels=labels_absolute_path,
from_scratch=True,
model_name=None,
save_dir=tmp_path / "my_model",
)
assert "If from_scratch=True, model_name cannot be None." == error.value.errors()[0]["msg"]
def test_predict_dry_run_and_save(labels_absolute_path, caplog, tmp_path):
config = PredictConfig(filepaths=labels_absolute_path, dry_run=True, save=True)
assert (
"Cannot save when predicting with dry_run=True. Setting save=False and save_dir=None."
in caplog.text
)
assert not config.save
assert config.save_dir is None
config = PredictConfig(filepaths=labels_absolute_path, dry_run=True, save_dir=tmp_path)
assert not config.save
assert config.save_dir is None
def test_predict_filepaths_with_duplicates(labels_absolute_path, tmp_path, caplog):
filepaths = pd.read_csv(labels_absolute_path, usecols=["filepath"])
# add duplicate filepath
pd.concat([filepaths, filepaths.loc[[0]]], ignore_index=True).to_csv(
tmp_path / "filepaths_with_dupe.csv"
)
PredictConfig(filepaths=tmp_path / "filepaths_with_dupe.csv")
assert "Found 1 duplicate row(s) in filepaths csv. Dropping duplicates" in caplog.text
def test_model_cache_dir(
labels_absolute_path, tmp_path, mock_download_weights, mock_model_species, monkeypatch
):
monkeypatch.delenv("MODEL_CACHE_DIR", raising=False)
config = TrainConfig(labels=labels_absolute_path, save_dir=tmp_path / "my_model")
assert config.model_cache_dir == Path(appdirs.user_cache_dir()) / "zamba"
monkeypatch.setenv("MODEL_CACHE_DIR", str(tmp_path))
config = TrainConfig(labels=labels_absolute_path, save_dir=tmp_path / "my_model")
assert config.model_cache_dir == tmp_path
config = PredictConfig(filepaths=labels_absolute_path, model_cache_dir=tmp_path / "my_cache")
assert config.model_cache_dir == tmp_path / "my_cache"
def test_predict_save(labels_absolute_path, tmp_path, dummy_trained_model_checkpoint):
# if save is True, save in current working directory
config = PredictConfig(filepaths=labels_absolute_path, skip_load_validation=True)
assert config.save_dir == Path.cwd()
config = PredictConfig(filepaths=labels_absolute_path, save=False, skip_load_validation=True)
assert config.save is False
assert config.save_dir is None
# if save_dir is specified, set save to True
config = PredictConfig(
filepaths=labels_absolute_path,
save=False,
save_dir=tmp_path / "my_dir",
skip_load_validation=True,
)
assert config.save is True
# save dir gets created
assert (tmp_path / "my_dir").exists()
# empty save dir does not error
save_dir = tmp_path / "save_dir"
save_dir.mkdir()
config = PredictConfig(
filepaths=labels_absolute_path, save_dir=save_dir, skip_load_validation=True
)
assert config.save_dir == save_dir
# save dir with prediction csv or yaml will error
for pred_file in [
(save_dir / "zamba_predictions.csv"),
(save_dir / "predict_configuration.yaml"),
]:
# just takes one of the two files to raise error
pred_file.touch()
with pytest.raises(ValueError) as error:
PredictConfig(
filepaths=labels_absolute_path, save_dir=save_dir, skip_load_validation=True
)
assert (
f"zamba_predictions.csv and/or predict_configuration.yaml already exist in {save_dir}. If you would like to overwrite, set overwrite=True"
== error.value.errors()[0]["msg"]
)
pred_file.unlink()
# can overwrite
pred_file.touch()
config = PredictConfig(
filepaths=labels_absolute_path,
save_dir=save_dir,
skip_load_validation=True,
overwrite=True,
)
assert config.save_dir == save_dir
def test_validate_scheduler(labels_absolute_path, tmp_path):
# None gets transformed into SchedulerConfig
config = TrainConfig(
labels=labels_absolute_path,
scheduler_config=None,
skip_load_validation=True,
save_dir=tmp_path / "my_model",
)
assert config.scheduler_config == SchedulerConfig(scheduler=None, scheduler_params=None)
# default is valid
config = TrainConfig(
labels=labels_absolute_path,
scheduler_config="default",
skip_load_validation=True,
save_dir=tmp_path / "my_model",
)
assert config.scheduler_config == "default"
# other strings are not
with pytest.raises(ValueError) as error:
TrainConfig(
labels=labels_absolute_path,
scheduler_config="StepLR",
skip_load_validation=True,
save_dir=tmp_path / "my_model",
)
assert (
"Scheduler can either be 'default', None, or a SchedulerConfig."
== error.value.errors()[0]["msg"]
)
# custom scheduler
config = TrainConfig(
labels=labels_absolute_path,
scheduler_config=SchedulerConfig(scheduler="StepLR", scheduler_params={"gamma": 0.2}),
skip_load_validation=True,
save_dir=tmp_path / "my_model",
)
assert config.scheduler_config == SchedulerConfig(
scheduler="StepLR", scheduler_params={"gamma": 0.2}
)
def test_dry_run_and_skip_load_validation(labels_absolute_path, caplog, tmp_path):
# check dry_run is True sets skip_load_validation to True
config = TrainConfig(
labels=labels_absolute_path,
dry_run=True,
skip_load_validation=False,
save_dir=tmp_path / "my_model",
)
assert config.skip_load_validation
assert "Turning off video loading check since dry_run=True." in caplog.text
# if dry run is False, skip_load_validation is unchanged
config = TrainConfig(
labels=labels_absolute_path,
dry_run=False,
skip_load_validation=False,
save_dir=tmp_path / "my_model",
)
assert not config.skip_load_validation
def test_default_video_loader_config(labels_absolute_path, tmp_path):
# if no video loader is specified, use default for model
config = ModelConfig(
train_config=TrainConfig(
labels=labels_absolute_path, skip_load_validation=True, save_dir=tmp_path / "my_model"
),
video_loader_config=None,
)
assert config.video_loader_config is not None
config = ModelConfig(
predict_config=PredictConfig(filepaths=labels_absolute_path, skip_load_validation=True),
video_loader_config=None,
)
assert config.video_loader_config is not None
def test_checkpoint_sets_model_to_default(
labels_absolute_path, dummy_trained_model_checkpoint, tmp_path
):
config = TrainConfig(
labels=labels_absolute_path,
checkpoint=dummy_trained_model_checkpoint,
skip_load_validation=True,
save_dir=tmp_path / "my_model",
)
assert config.model_name == "dummy_model"
config = PredictConfig(
filepaths=labels_absolute_path,
checkpoint=dummy_trained_model_checkpoint,
skip_load_validation=True,
)
assert config.model_name == "dummy_model"
def test_validate_provided_species_and_use_default_model_labels(labels_absolute_path, tmp_path):
# labels are subset of time distributed model
config = TrainConfig(
labels=labels_absolute_path,
model_name="time_distributed",
skip_load_validation=True,
save_dir=tmp_path / "my_model",
)
# because this is a subset, use_default_model_labels gets set to True
assert config.use_default_model_labels
# because this is a subset, I can choose to set it to False
config = TrainConfig(
labels=labels_absolute_path,
model_name="time_distributed",
skip_load_validation=True,
save_dir=tmp_path / "my_model",
use_default_model_labels=False,
)
assert config.use_default_model_labels is False
# create labels with a species that is not in the model
alien_labels = pd.read_csv(labels_absolute_path)
alien_labels["label"] = "alien"
config = TrainConfig(
labels=alien_labels,
model_name="time_distributed",
skip_load_validation=True,
save_dir=tmp_path / "my_model",
)
# by default this gets set to False
assert config.use_default_model_labels is False
# if I try to set this to True, it will error
with pytest.raises(ValueError) as error:
TrainConfig(
labels=alien_labels,
model_name="time_distributed",
skip_load_validation=True,
save_dir=tmp_path / "my_model",
use_default_model_labels=True,
)
assert (
"Conflicting information between `use_default_model_labels=True` and species provided."
in error
)
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_datamodule.py | tests/test_datamodule.py | import itertools
from zamba.pytorch.dataloaders import get_datasets
from zamba.pytorch_lightning.video_modules import ZambaVideoDataModule
def test_get_datasets_train_metadata(train_metadata):
train_dataset, val_dataset, test_dataset, predict_dataset = get_datasets(
train_metadata=train_metadata,
)
for video, label in itertools.chain(train_dataset, val_dataset, test_dataset):
assert video.ndim == 4
assert label.sum() == 1
assert predict_dataset is None
def test_get_datasets_predict_metadata(predict_metadata):
train_dataset, val_dataset, test_dataset, predict_dataset = get_datasets(
predict_metadata=predict_metadata,
)
for video, label in predict_dataset:
assert video.ndim == 4
assert label.sum() == 0
assert train_dataset is None
assert val_dataset is None
assert test_dataset is None
def test_get_datasets_train_and_predict_metadata(train_metadata, predict_metadata):
train_dataset, val_dataset, test_dataset, predict_dataset = get_datasets(
train_metadata=train_metadata,
predict_metadata=predict_metadata,
)
for video, label in itertools.chain(train_dataset, val_dataset, test_dataset):
assert video.ndim == 4
assert label.sum() == 1
for video, label in predict_dataset:
assert video.ndim == 4
assert label.sum() == 0
def test_zamba_data_module_train(train_metadata):
data_module = ZambaVideoDataModule(train_metadata=train_metadata)
for videos, labels in data_module.train_dataloader():
assert videos.ndim == 5
assert labels.sum() == 1
def test_zamba_data_module_train_and_predict(train_metadata, predict_metadata):
data_module = ZambaVideoDataModule(
train_metadata=train_metadata,
predict_metadata=predict_metadata,
)
for videos, labels in data_module.train_dataloader():
assert videos.ndim == 5
assert labels.sum() == 1
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_model_manager.py | tests/test_model_manager.py | import json
from pathlib import Path
from botocore.exceptions import ClientError
import pytest
import torch
import yaml
from zamba.models.utils import download_weights, get_model_checkpoint_filename
from zamba.models.model_manager import train_model
from conftest import DummyTrainConfig, TEST_VIDEOS_DIR, labels_n_classes_df
def test_model_manager(dummy_trainer):
assert (dummy_trainer.model.model[2].weight == 1).all()
assert not (dummy_trainer.model.model[3].weight == 0).all()
def test_no_early_stopping(
labels_absolute_path, tmp_path, dummy_checkpoint, dummy_video_loader_config
):
config = DummyTrainConfig(
labels=labels_absolute_path,
data_dir=TEST_VIDEOS_DIR,
checkpoint=dummy_checkpoint,
early_stopping_config=None,
save_dir=tmp_path / "my_model",
num_workers=1,
)
train_model(train_config=config, video_loader_config=dummy_video_loader_config)
def test_save_checkpoint(dummy_trained_model_checkpoint):
checkpoint = torch.load(dummy_trained_model_checkpoint, weights_only=False)
assert tuple(checkpoint["state_dict"].keys()) == (
"backbone.weight",
"backbone.bias",
"head.weight",
"head.bias",
"model.2.weight",
"model.2.bias",
"model.3.weight",
"model.3.bias",
)
assert checkpoint["hyper_parameters"] == {
"lr": 0.001,
"model_class": "DummyZambaVideoClassificationLightningModule",
"num_frames": 4,
"num_hidden": 1,
"scheduler": None,
"scheduler_params": None,
"species": ["antelope_duiker", "elephant", "gorilla"],
}
@pytest.mark.parametrize("split", ("test", "val"))
def test_save_metrics(dummy_trainer, split):
metric_names = {
"val_loss",
f"{split}_macro_f1",
f"{split}_top_1_accuracy",
f"species/{split}_accuracy/antelope_duiker",
f"species/{split}_f1/antelope_duiker",
f"species/{split}_precision/antelope_duiker",
f"species/{split}_recall/antelope_duiker",
f"species/{split}_accuracy/elephant",
f"species/{split}_f1/elephant",
f"species/{split}_precision/elephant",
f"species/{split}_recall/elephant",
f"species/{split}_accuracy/gorilla",
f"species/{split}_f1/gorilla",
f"species/{split}_precision/gorilla",
f"species/{split}_recall/gorilla",
}
with (Path(dummy_trainer.logger.log_dir) / f"{split}_metrics.json").open() as fp:
metrics = json.load(fp)
assert metrics.keys() == metric_names
@pytest.mark.parametrize("split", ("test", "val"))
def test_save_metrics_less_than_two_classes(
dummy_video_loader_config, split, dummy_checkpoint, tmp_path
):
labels = labels_n_classes_df(2)
trainer = train_model(
train_config=DummyTrainConfig(
labels=labels,
data_dir=TEST_VIDEOS_DIR,
checkpoint=dummy_checkpoint,
num_workers=2,
save_dir=tmp_path / "my_model",
),
video_loader_config=dummy_video_loader_config,
)
with (Path(trainer.logger.log_dir) / f"{split}_metrics.json").open() as fp:
metrics = json.load(fp)
metric_names = {
"val_loss",
f"{split}_macro_f1",
f"{split}_accuracy",
}
for c in labels.label.str.lower().unique():
metric_names = metric_names.union(
{
f"species/{split}_accuracy/{c}",
f"species/{split}_f1/{c}",
f"species/{split}_precision/{c}",
f"species/{split}_recall/{c}",
}
)
removed_in_binary_case = {
"species/test_precision/b",
"species/test_recall/b",
"species/test_accuracy/b",
"species/test_f1/b",
"species/val_precision/b",
"species/val_recall/b",
"species/val_accuracy/b",
"species/val_f1/b",
}
assert metrics.keys() == metric_names - removed_in_binary_case
def test_save_configuration(dummy_trainer):
with (Path(dummy_trainer.logger.log_dir) / "train_configuration.yaml").open() as fp:
config = yaml.safe_load(fp.read())
assert set(config.keys()) == {
"git_hash",
"model_class",
"species",
"starting_learning_rate",
"train_config",
"training_start_time",
"video_loader_config",
}
def test_train_save_dir(dummy_trainer):
assert Path(dummy_trainer.logger.root_dir).name == "my_model"
assert Path(dummy_trainer.logger.log_dir).name == "version_0"
def test_train_save_dir_overwrite(
labels_absolute_path, dummy_checkpoint, tmp_path, dummy_video_loader_config
):
config = DummyTrainConfig(
labels=labels_absolute_path,
data_dir=TEST_VIDEOS_DIR,
checkpoint=dummy_checkpoint,
save_dir=tmp_path / "my_model",
overwrite=True,
num_workers=1,
)
overwrite_trainer = train_model(
train_config=config, video_loader_config=dummy_video_loader_config
)
assert Path(overwrite_trainer.logger.log_dir).resolve() == config.save_dir.resolve()
assert not any([f.name.startswith("version_") for f in config.save_dir.iterdir()])
for f in [
"train_configuration.yaml",
"test_metrics.json",
"val_metrics.json",
"dummy_model.ckpt",
]:
assert (config.save_dir / f).exists()
@pytest.mark.parametrize(
"model_name", ["time_distributed", "slowfast", "european", "blank_nonblank"]
)
@pytest.mark.parametrize("weight_region", ["us", "asia", "eu"])
def test_download_weights(model_name, weight_region, tmp_path):
public_weights = get_model_checkpoint_filename(model_name)
ckpt_path = download_weights(
filename=public_weights,
weight_region=weight_region,
destination_dir=tmp_path,
)
# ensure download happened
assert Path(ckpt_path).exists()
# ensure path is correct
assert Path(ckpt_path) == tmp_path / public_weights
# invalid filename
with pytest.raises(ClientError):
download_weights(
filename="incorrect_checkpoint.ckpt", destination_dir=tmp_path, weight_region="us"
)
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_image_file_handling.py | tests/test_image_file_handling.py | """Tests for filepath handling in the image code path"""
import json
from pathlib import Path
import shutil
import pandas as pd
import pytest
import torch
from typer.testing import CliRunner
from zamba.image_cli import app as image_app
from zamba.images.data import ImageClassificationDataModule
from conftest import ASSETS_DIR
runner = CliRunner()
TEST_IMAGES_DIR = ASSETS_DIR / "images"
ENA24_SMALL_DIR = TEST_IMAGES_DIR / "ena24_small"
def create_csv_with_relative_paths(files, output_path):
"""Create a CSV file with relative paths to the given files."""
pd.DataFrame({"filepath": [f.name for f in files]}).to_csv(output_path, index=False)
def create_csv_with_absolute_paths(files, output_path):
"""Create a CSV file with absolute paths to the given files."""
pd.DataFrame({"filepath": [str(f.absolute()) for f in files]}).to_csv(output_path, index=False)
def create_csv_with_mixed_paths(files, output_path):
"""Create a CSV file with a mix of relative and absolute paths."""
filepaths = []
for i, f in enumerate(files):
if i % 2 == 0:
filepaths.append(f.name) # Relative path
else:
filepaths.append(str(f.absolute())) # Absolute path
pd.DataFrame({"filepath": filepaths}).to_csv(output_path, index=False)
def create_labels_csv(files, labels_df, output_path):
"""Create a labels CSV file with the given files and labels."""
# Extract just the filename part to match with the labels dataframe
filenames = [f.name for f in files]
# Create a new dataframe with just the files we're using
filtered_labels = labels_df[labels_df["filepath"].isin(filenames)].copy()
# Update the filepath column with the appropriate path format
filtered_labels["filepath"] = filenames
filtered_labels.to_csv(output_path, index=False)
@pytest.fixture
def ena24_dataset_setup(tmp_path):
"""Set up a temporary directory with the ena24_small dataset."""
# Create a data directory for test images
data_dir = tmp_path / "images"
data_dir.mkdir()
# Copy all image files from ena24_small to our temp directory
image_files = []
for img_path in ENA24_SMALL_DIR.glob("*.jpg"):
shutil.copy(img_path, data_dir)
image_files.append(data_dir / img_path.name)
csv_dir = tmp_path / "csv"
csv_dir.mkdir()
# Create CSVs with different path formats for prediction
relative_csv = csv_dir / "relative_paths.csv"
create_csv_with_relative_paths(image_files, relative_csv)
absolute_csv = csv_dir / "absolute_paths.csv"
create_csv_with_absolute_paths(image_files, absolute_csv)
mixed_csv = csv_dir / "mixed_paths.csv"
create_csv_with_mixed_paths(image_files, mixed_csv)
original_labels_csv = ENA24_SMALL_DIR / "labels.csv"
original_labels_df = pd.read_csv(original_labels_csv)
relative_labels = csv_dir / "relative_labels.csv"
create_labels_csv(image_files, original_labels_df, relative_labels)
absolute_labels = csv_dir / "absolute_labels.csv"
absolute_labels_df = original_labels_df.copy()
absolute_labels_df["filepath"] = [
str((data_dir / f).absolute()) for f in original_labels_df["filepath"]
]
absolute_labels_df.to_csv(absolute_labels, index=False)
mixed_labels = csv_dir / "mixed_labels.csv"
mixed_labels_df = original_labels_df.copy()
for i, filepath in enumerate(mixed_labels_df["filepath"]):
if i % 2 == 0:
mixed_labels_df.loc[i, "filepath"] = filepath # Relative path
else:
mixed_labels_df.loc[i, "filepath"] = str(
(data_dir / filepath).absolute()
) # Absolute path
mixed_labels_df.to_csv(mixed_labels, index=False)
# Create output directories
save_dir = tmp_path / "output"
save_dir.mkdir()
checkpoint_dir = tmp_path / "checkpoints"
checkpoint_dir.mkdir()
cache_dir = tmp_path / "cache"
cache_dir.mkdir()
original_annotations_json = ENA24_SMALL_DIR / "coco_annotations.json"
coco_file = csv_dir / "coco_annotations.json"
shutil.copy(original_annotations_json, coco_file)
# The megadetector annotations have been validated with
# https://github.com/agentmorris/MegaDetector/blob/main/megadetector/postprocessing/validate_batch_results.py
original_md_annotations_json = ENA24_SMALL_DIR / "md_annotations.json"
md_file = csv_dir / "md_annotations.json"
shutil.copy(original_md_annotations_json, md_file)
return {
"data_dir": data_dir,
"csv_dir": csv_dir,
"save_dir": save_dir,
"checkpoint_dir": checkpoint_dir,
"cache_dir": cache_dir,
"files": image_files,
"labels_df": original_labels_df,
"relative_csv": relative_csv,
"absolute_csv": absolute_csv,
"mixed_csv": mixed_csv,
"relative_labels": relative_labels,
"absolute_labels": absolute_labels,
"mixed_labels": mixed_labels,
"coco_file": coco_file,
"md_file": md_file,
}
@pytest.mark.parametrize("input_format", ["relative_csv", "absolute_csv", "mixed_csv"])
def test_image_cli_with_paths(mocker, ena24_dataset_setup, input_format):
"""Test that the image CLI can handle a CSV with the specified path format."""
predict_mock = mocker.patch("zamba.images.manager.ZambaImagesManager.predict")
result = runner.invoke(
image_app,
[
"predict",
"--data-dir",
str(ena24_dataset_setup["data_dir"]),
"--filepaths",
str(ena24_dataset_setup[input_format]),
"--save-dir",
str(ena24_dataset_setup["save_dir"]),
"--yes",
],
)
assert result.exit_code == 0
predict_mock.assert_called_once()
# Check that the config passed to the predict function has the correct filepaths
predict_config = predict_mock.call_args[0][0]
assert len(predict_config.filepaths) == len(ena24_dataset_setup["files"])
for file_path in predict_config.filepaths["filepath"]:
assert Path(file_path).exists()
@pytest.mark.parametrize("input_format", ["relative_labels", "absolute_labels", "mixed_labels"])
def test_train_with_different_path_formats(mocker, ena24_dataset_setup, input_format):
"""Test that the image CLI can handle training with different label path formats."""
train_mock = mocker.patch("zamba.images.manager.ZambaImagesManager.train")
result = runner.invoke(
image_app,
[
"train",
"--data-dir",
str(ena24_dataset_setup["data_dir"]),
"--labels",
str(ena24_dataset_setup[input_format]),
"--save-dir",
str(ena24_dataset_setup["save_dir"]),
"--cache-dir",
str(ena24_dataset_setup["cache_dir"]),
"--checkpoint-path",
str(ena24_dataset_setup["checkpoint_dir"]),
"--from-scratch", # Train from scratch to avoid needing a checkpoint
"--max-epochs",
"1", # Just 1 epoch for testing
"--yes",
],
)
assert result.exit_code == 0
train_mock.assert_called_once()
train_config = train_mock.call_args[0][0]
processed_images = set(Path(p).name for p in train_config.labels["filepath"].tolist())
expected_images = {f.name for f in ena24_dataset_setup["files"]}
assert len(processed_images) > 0
assert processed_images == expected_images
def test_image_cli_file_discovery(mocker, ena24_dataset_setup):
"""Test that the image CLI can discover files in a directory when no CSV is provided."""
predict_mock = mocker.patch("zamba.images.manager.ZambaImagesManager.predict")
result = runner.invoke(
image_app,
[
"predict",
"--data-dir",
str(ena24_dataset_setup["data_dir"]),
"--save-dir",
str(ena24_dataset_setup["save_dir"]),
"--yes",
],
)
assert result.exit_code == 0
predict_mock.assert_called_once()
# Check that all files in the directory were found
predict_config = predict_mock.call_args[0][0]
assert len(predict_config.filepaths) == len(ena24_dataset_setup["files"])
def test_train_with_coco_labels(mocker, ena24_dataset_setup):
"""Test training with COCO format JSON labels."""
train_mock = mocker.patch("zamba.images.manager.ZambaImagesManager.train")
result = runner.invoke(
image_app,
[
"train",
"--data-dir",
str(ena24_dataset_setup["data_dir"]),
"--labels",
str(ena24_dataset_setup["coco_file"]),
"--save-dir",
str(ena24_dataset_setup["save_dir"]),
"--cache-dir",
str(ena24_dataset_setup["cache_dir"]),
"--checkpoint-path",
str(ena24_dataset_setup["checkpoint_dir"]),
"--from-scratch", # Train from scratch to avoid needing a checkpoint
"--max-epochs",
"1", # Just 1 epoch for testing
"--yes",
],
)
assert result.exit_code == 0
train_mock.assert_called_once()
train_config = train_mock.call_args[0][0]
assert len(train_config.labels) == 21
assert set(["x1", "y1", "x2", "y2"]).issubset(train_config.labels.columns)
# Test that x2 is calculated correctly from x1 and width (1 and 111)
assert train_config.labels.loc[0, "x2"] == 112
# Test that y2 is calculated correctly from x1 and height (11 and 10)
assert train_config.labels.loc[0, "y2"] == 21
def test_train_with_md_labels(mocker, ena24_dataset_setup):
"""Test training with COCO format JSON labels."""
train_mock = mocker.patch("zamba.images.manager.ZambaImagesManager.train")
result = runner.invoke(
image_app,
[
"train",
"--data-dir",
str(ena24_dataset_setup["data_dir"]),
"--labels",
str(ena24_dataset_setup["md_file"]),
"--labels-format",
"megadetector",
"--save-dir",
str(ena24_dataset_setup["save_dir"]),
"--cache-dir",
str(ena24_dataset_setup["cache_dir"]),
"--checkpoint-path",
str(ena24_dataset_setup["checkpoint_dir"]),
"--from-scratch", # Train from scratch to avoid needing a checkpoint
"--max-epochs",
"1", # Just 1 epoch for testing
"--yes",
],
)
assert result.exit_code == 0
train_mock.assert_called_once()
config = train_mock.call_args[0][0]
assert len(config.labels) == 21
assert set(["x1", "y1", "x2", "y2"]).issubset(config.labels.columns)
# MD bounds are relative
assert config.labels.loc[0, "x1"] < 1.0
data = ImageClassificationDataModule(
data_dir=config.data_dir,
cache_dir=config.cache_dir,
annotations=config.labels,
batch_size=config.batch_size,
num_workers=config.num_workers,
detection_threshold=config.detections_threshold,
crop_images=config.crop_images,
)
assert data.annotations.loc[0, "x1"] == 1920 * 0.35 # 672
assert data.annotations.loc[0, "y1"] == 1080 * 0.35 # 378
# some buffer for floating point precision
assert (
abs(data.annotations.loc[0, "x2"] - ((1920 * 0.35) + (1920 * 0.3))) <= 1
) # 672 + 576 = 1248
assert (
abs(data.annotations.loc[0, "y2"] - ((1080 * 0.35) + (1080 * 0.3))) <= 1
) # 378 + 324 = 702
# make sure bounding boxes are absolute
assert (
(
(data.annotations.loc[:, ["x1", "y1", "x2", "y2"]] == 0)
| (data.annotations.loc[:, ["x1", "y1", "x2", "y2"]] >= 1.0)
)
.all()
.all()
)
def test_image_cli_csv_output(mocker, ena24_dataset_setup):
"""Test that the image CLI can output predictions in CSV format."""
mock_detector = mocker.MagicMock()
mock_detector.generate_detections_one_image.return_value = {
"detections": [
{
"category": "1", # Animal
"conf": 0.9,
"bbox": [0.1, 0.2, 0.3, 0.4],
}
]
}
mocker.patch("megadetector.detection.run_detector.load_detector", return_value=mock_detector)
mock_classifier = mocker.MagicMock()
mock_classifier.species = ["animal", "blank"]
# Mock classification result
mock_classification_result = torch.tensor([[0.8, 0.2]])
mock_classifier.return_value = mock_classification_result
mocker.patch("zamba.images.manager.instantiate_model", return_value=mock_classifier)
# Run the CLI command
result = runner.invoke(
image_app,
[
"predict",
"--data-dir",
str(ena24_dataset_setup["data_dir"]),
"--save-dir",
str(ena24_dataset_setup["save_dir"]),
"--results-file-format",
"csv",
"--yes",
],
)
assert result.exit_code == 0
# Check that the CSV output file was created
output_csv = ena24_dataset_setup["save_dir"] / "zamba_predictions.csv"
assert output_csv.exists()
# Check the contents of the CSV file
predictions = pd.read_csv(output_csv)
# Check that required columns exist
assert "filepath" in predictions.columns
assert "detection_category" in predictions.columns
assert "detection_conf" in predictions.columns
assert "x1" in predictions.columns and "y1" in predictions.columns
assert "x2" in predictions.columns and "y2" in predictions.columns
# Check that our species columns exist
for species in ["animal", "blank"]:
assert species in predictions.columns
def test_image_cli_megadetector_output(mocker, ena24_dataset_setup):
"""Test that the image CLI can output predictions in MegaDetector JSON format."""
# Mock detector
mock_detector = mocker.MagicMock()
mock_detector.generate_detections_one_image.return_value = {
"detections": [
{
"category": "1", # Animal
"conf": 0.9,
"bbox": [0.1, 0.2, 0.3, 0.4],
}
]
}
mocker.patch("megadetector.detection.run_detector.load_detector", return_value=mock_detector)
mock_classifier = mocker.MagicMock()
mock_classifier.species = ["animal", "blank"]
mock_classification_result = torch.tensor([[0.8, 0.2]])
mock_classifier.return_value = mock_classification_result
mocker.patch("zamba.images.manager.instantiate_model", return_value=mock_classifier)
result = runner.invoke(
image_app,
[
"predict",
"--data-dir",
str(ena24_dataset_setup["data_dir"]),
"--save-dir",
str(ena24_dataset_setup["save_dir"]),
"--results-file-format",
"megadetector",
"--yes",
],
)
assert result.exit_code == 0
output_json = ena24_dataset_setup["save_dir"] / "zamba_predictions.json"
assert output_json.exists()
# Check the contents of the JSON file
with open(output_json, "r") as f:
predictions = json.load(f)
assert "images" in predictions
assert "detection_categories" in predictions
assert "info" in predictions
first_image = predictions["images"][0]
assert "file" in first_image
assert "detections" in first_image
detection = first_image["detections"][0]
assert "category" in detection
assert "conf" in detection
assert "bbox" in detection
classification = detection["classifications"][0]
assert len(classification) == 2
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_instantiate_model.py | tests/test_instantiate_model.py | import pandas as pd
import pytest
import torch
from zamba.models.config import SchedulerConfig, TrainConfig
from zamba.models.model_manager import instantiate_model
from zamba.models.utils import get_model_species
from conftest import DummyZambaVideoClassificationLightningModule
def test_scheduler_ignored_for_prediction(dummy_checkpoint):
"""Tests whether we can instantiate a model for prediction and ignore scheduler config."""
original_hyperparams = torch.load(dummy_checkpoint, weights_only=False)["hyper_parameters"]
assert original_hyperparams["scheduler"] is None
model = instantiate_model(
checkpoint=dummy_checkpoint,
scheduler_config=SchedulerConfig(scheduler="StepLR", scheduler_params=None),
labels=None,
)
# since labels is None, we are predicting. as a result, hparams are not updated
assert model.hparams["scheduler"] is None
# Note: using configs won't allow us to be in this situation
# # in Train Config, which contains ModelParams, labels cannot be None
def test_default_scheduler_used(time_distributed_checkpoint):
"""Tests instantiate model uses the default scheduler from the hparams on the model."""
default_scheduler_passed_model = instantiate_model(
checkpoint=time_distributed_checkpoint,
scheduler_config="default",
labels=pd.DataFrame([{"filepath": "gorilla.mp4", "species_gorilla": 1}]),
)
# with "default" scheduler_config, hparams from training are used
assert default_scheduler_passed_model.hparams["scheduler"] == "MultiStepLR"
assert default_scheduler_passed_model.hparams["scheduler_params"] == dict(
milestones=[3], gamma=0.5, verbose=True
)
def test_scheduler_used_if_passed(time_distributed_checkpoint):
"""Tests that scheduler config gets used and overrides scheduler on time distributed training."""
scheduler_passed_model = instantiate_model(
checkpoint=time_distributed_checkpoint,
scheduler_config=SchedulerConfig(scheduler="StepLR"),
labels=pd.DataFrame([{"filepath": "gorilla.mp4", "species_gorilla": 1}]),
)
# hparams reflect user specified scheduler config
assert scheduler_passed_model.hparams["scheduler"] == "StepLR"
# if no scheduler params are passed, will be None (use PTL default for that scheduler)
assert scheduler_passed_model.hparams["scheduler_params"] is None
# check scheduler params get used
scheduler_params_passed_model = instantiate_model(
checkpoint=time_distributed_checkpoint,
scheduler_config=SchedulerConfig(scheduler="StepLR", scheduler_params={"gamma": 0.3}),
labels=pd.DataFrame([{"filepath": "gorilla.mp4", "species_gorilla": 1}]),
)
assert scheduler_params_passed_model.hparams["scheduler_params"] == {"gamma": 0.3}
def test_remove_scheduler(time_distributed_checkpoint):
"""Tests that a scheduler config with None values removes the scheduler on the model."""
remove_scheduler_model = instantiate_model(
checkpoint=time_distributed_checkpoint,
scheduler_config=SchedulerConfig(scheduler=None),
labels=pd.DataFrame([{"filepath": "gorilla.mp4", "species_gorilla": 1}]),
)
# pretrained model has scheduler but this is overridden with SchedulerConfig
assert remove_scheduler_model.hparams["scheduler"] is None
def test_use_default_model_labels(dummy_trained_model_checkpoint):
"""Tests that training a model using labels that are a subset of the model species resumes
model training without replacing the model head."""
original_model = DummyZambaVideoClassificationLightningModule.from_disk(
dummy_trained_model_checkpoint
)
model = instantiate_model(
checkpoint=dummy_trained_model_checkpoint,
scheduler_config="default",
labels=pd.DataFrame([{"filepath": "gorilla.mp4", "species_gorilla": 1}]),
use_default_model_labels=True,
)
assert (model.head.weight == original_model.head.weight).all()
assert model.hparams["species"] == [
"antelope_duiker",
"elephant",
"gorilla",
]
assert model.model[-1].out_features == 3
def test_not_use_default_model_labels(dummy_trained_model_checkpoint):
"""Tests that training a model using labels that are a subset of the model species but
with use_default_model_labels=False replaces the model head."""
original_model = DummyZambaVideoClassificationLightningModule.from_disk(
dummy_trained_model_checkpoint
)
model = instantiate_model(
checkpoint=dummy_trained_model_checkpoint,
scheduler_config="default",
labels=pd.DataFrame([{"filepath": "gorilla.mp4", "species_gorilla": 1}]),
use_default_model_labels=False,
)
assert (model.head.weight != original_model.head.weight).all()
assert model.hparams["species"] == [
"gorilla",
]
assert model.model[-1].out_features == 1
@pytest.mark.parametrize(
"model_name", ["time_distributed", "slowfast", "european", "blank_nonblank"]
)
def test_head_replaced_for_new_species(labels_absolute_path, model_name, tmp_path):
"""Check that output species reflect the new head."""
# pick species that is not present in any models
labels = pd.read_csv(labels_absolute_path)
labels["label"] = "kangaroo"
config = TrainConfig(
labels=labels,
model_name=model_name,
skip_load_validation=True,
save_dir=tmp_path / "my_model",
)
model = instantiate_model(
checkpoint=config.checkpoint,
scheduler_config="default",
labels=config.labels,
use_default_model_labels=config.use_default_model_labels,
)
assert model.hparams["species"] == model.species == ["kangaroo"]
@pytest.mark.parametrize("model_name", ["time_distributed", "slowfast", "european"])
def test_resume_subset_labels(labels_absolute_path, model_name, tmp_path):
"""Check that output species reflect the default model labels."""
# pick species that is present in all models
labels = pd.read_csv(labels_absolute_path)
labels["label"] = "bird"
config = TrainConfig(
labels=labels,
model_name=model_name,
skip_load_validation=True,
save_dir=tmp_path / "my_model",
)
model = instantiate_model(
checkpoint=config.checkpoint,
scheduler_config=SchedulerConfig(scheduler="StepLR", scheduler_params=None),
labels=config.labels,
use_default_model_labels=config.use_default_model_labels,
)
assert model.hparams["scheduler"] == "StepLR"
assert model.species == get_model_species(checkpoint=None, model_name=model_name)
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_load_video_frames.py | tests/test_load_video_frames.py | import os
from pathlib import Path
import pytest
import shutil
import subprocess
from typing import Any, Callable, Dict, Optional, Union
from unittest import mock
import numpy as np
from PIL import Image
from pydantic import BaseModel, ValidationError
from zamba.data.video import (
load_video_frames,
MegadetectorLiteYoloXConfig,
VideoLoaderConfig,
)
from zamba.pytorch.dataloaders import FfmpegZambaVideoDataset
from conftest import ASSETS_DIR, TEST_VIDEOS_DIR
class Case(BaseModel):
"""A single test case for the load_video_frames function. Includes the fields:
Attributes:
name (str): A descriptive name of the test case
parameters (dict): Arguments to load_video_frames
expected_output (dict, optional): Describes the expected shape of the video that results
from calling load_video_frames with the given parameters. Keys must be from
('frames', 'height', 'width', 'channels') and values can be integers or a callable that
computes an integer from the original video metadata and output video shape.
validation_error (bool, optional): Whether a ValidationError is expected from calling
load_video_frames with the given parameters.
"""
name: str
parameters: Dict[str, Any] = {}
expected_output: Optional[Dict[str, Union[int, Callable]]] = None
validation_error: Optional[bool] = False
def assert_crop_bottom_pixels(original_video_metadata, video_shape, **kwargs):
return original_video_metadata["height"] == (
video_shape["height"] - kwargs["crop_bottom_pixels"]
)
def assert_megadetector_le_total(original_video_metadata, video_shape, **kwargs):
"""Since we don't know how many frames the megadetector will find over the threshold,
test that the number of frames returned by the megadetector is less than or equal
to the total_frames argument.
"""
return video_shape["frames"] <= kwargs["total_frames"]
def assert_megadetector_total_or_none(original_video_metadata, video_shape, **kwargs):
"""Some megadetector fill modes should resample among qualifying frames up to
total_frames. In these cases, the number of frames returned should be either
total_frames (if at least 1 qualifies) or 0 (if none qualify).
If the threshold given to the megadetector is 2, then by default the
min_over_threshold will be calculated to 0.
"""
return video_shape["frames"] in [0, kwargs["total_frames"]]
def assert_no_frames_or_correct_shape(original_video_metadata, video_shape, **kwargs):
return (video_shape["frames"] == 0) or (
(video_shape["height"] == kwargs["frame_selection_height"])
and (video_shape["width"] == kwargs["frame_selection_width"])
)
test_cases = [
Case(
name="crop_bottom_pixels",
parameters={"crop_bottom_pixels": 2},
expected_output={"height": assert_crop_bottom_pixels},
),
Case(
name="crop_bottom_pixels_and_scene_threshold",
parameters={
"crop_bottom_pixels": 2,
"scene_threshold": 0.05,
},
expected_output={"height": assert_crop_bottom_pixels},
),
Case(
name="crop_bottom_pixels_and_i_frames",
parameters={
"crop_bottom_pixels": 2,
"i_frames": True,
},
expected_output={"height": assert_crop_bottom_pixels},
),
Case(
name="crop_bottom_pixels_and_video_height_width",
parameters={
"crop_bottom_pixels": 2,
"frame_selection_height": 10,
"frame_selection_width": 10,
},
expected_output={"height": 10, "width": 10},
),
Case(
name="crop_bottom_pixels_and_total_frames",
parameters={
"crop_bottom_pixels": 2,
"total_frames": 10,
},
expected_output={"height": assert_crop_bottom_pixels, "frames": 10},
),
Case(
name="crop_bottom_pixels_and_fps",
parameters={
"crop_bottom_pixels": 2,
"fps": 1,
},
expected_output={"height": assert_crop_bottom_pixels},
),
Case(
name="i_frames",
parameters={"i_frames": True},
),
Case(
name="i_frames_and_scene_threshold",
parameters={
"i_frames": True,
"scene_threshold": 0.05,
},
validation_error=True,
),
Case(
name="i_frames_and_video_height_width",
parameters={
"i_frames": True,
"frame_selection_height": 10,
"frame_selection_width": 10,
},
expected_output={"height": 10, "width": 10},
),
Case(
name="i_frames_and_total_frames",
parameters={
"i_frames": True,
"total_frames": 10,
},
expected_output={"frames": 10},
),
Case(
name="i_frames_and_fps",
parameters={
"i_frames": True,
"fps": 1,
},
validation_error=True,
),
Case(
name="scene_threshold",
parameters={"scene_threshold": 0.05},
),
Case(
name="scene_threshold_and_video_height_width",
parameters={
"scene_threshold": 0.05,
"frame_selection_height": 10,
"frame_selection_width": 10,
},
expected_output={"height": 10, "width": 10},
),
Case(
name="scene_threshold_and_fps",
parameters={
"scene_threshold": 0.05,
"fps": 10,
},
validation_error=True,
),
Case(
name="video_height_width",
parameters={"frame_selection_height": 10, "frame_selection_width": 10},
expected_output={"height": 10, "width": 10},
),
Case(
name="video_height_width_and_total_frames",
parameters={"frame_selection_height": 10, "frame_selection_width": 10, "total_frames": 10},
expected_output={"height": 10, "width": 10, "frames": 10},
),
Case(
name="video_height_width_and_fps",
parameters={"frame_selection_height": 10, "frame_selection_width": 10, "fps": 10},
expected_output={"height": 10, "width": 10},
),
Case(
name="total_frames",
parameters={"total_frames": 10, "ensure_total_frames": True},
expected_output={"frames": 10},
),
Case(
name="fps",
parameters={"fps": 1},
),
Case(
name="early_bias",
parameters={"early_bias": True},
expected_output={"frames": 16},
),
Case(
name="early_bias_and_total_frames",
parameters={"early_bias": True, "total_frames": 10},
validation_error=True,
),
Case(
name="early_bias_and_scene_threshold",
parameters={"early_bias": True, "scene_threshold": 0.2},
validation_error=True,
),
Case(
name="early_bias_and_i_frames",
parameters={"early_bias": True, "i_frames": True},
validation_error=True,
),
Case(
name="early_bias_and_fps",
parameters={"early_bias": True, "fps": 20},
validation_error=True,
),
Case(
name="single_frame_index",
parameters={"frame_indices": [10]},
expected_output={"frames": 1},
),
Case(
name="multiple_frame_indices",
parameters={"frame_indices": [0, 2, 10]},
expected_output={"frames": 3},
),
Case(
name="frame_indices_and_total_frames",
parameters={"frame_indices": [10], "total_frames": 10},
validation_error=True,
),
Case(
name="frame_indices_and_scene_threshold",
parameters={"frame_indices": [10], "scene_threshold": 0.2},
validation_error=True,
),
Case(
name="frame_indices_and_i_frames",
parameters={"frame_indices": [10], "i_frames": True},
validation_error=True,
),
Case(
name="frame_indices_and_early_bias",
parameters={"frame_indices": [10], "early_bias": True},
validation_error=True,
),
Case(
name="evenly_sample",
parameters={"total_frames": 10, "evenly_sample_total_frames": True},
expected_output={"frames": 10},
),
Case(
name="evenly_sample_and_not_total_frames",
parameters={"evenly_sample_total_frames": True, "total_frames": None},
validation_error=True,
),
Case(
name="evenly_sample_and_scene_threshold",
parameters={
"total_frames": 10,
"evenly_sample_total_frames": True,
"scene_threshold": 0.2,
},
validation_error=True,
),
Case(
name="evenly_sample_and_i_frames",
parameters={"total_frames": 10, "evenly_sample_total_frames": True, "i_frames": True},
validation_error=True,
),
Case(
name="evenly_sample_and_fps",
parameters={"total_frames": 10, "evenly_sample_total_frames": True, "fps": 20},
validation_error=True,
),
Case(
name="evenly_sample_and_early_bias",
parameters={"total_frames": 10, "evenly_sample_total_frames": True, "early_bias": True},
validation_error=True,
),
Case(
name="megadetector_and_early_bias",
parameters={"megadetector_lite_config": {"confidence": 0.25}, "early_bias": True},
validation_error=True,
),
Case(
name="megadetector_and_evenly_sample",
parameters={
"megadetector_lite_config": {"confidence": 0.25},
"total_frames": 10,
"evenly_sample_total_frames": True,
},
validation_error=True,
),
Case(
name="megadetector_and_two_total_frames",
parameters={
"megadetector_lite_config": {"confidence": 0.01},
"total_frames": 2,
"ensure_total_frames": False,
"fps": 2,
},
expected_output={"frames": assert_megadetector_total_or_none},
),
Case(
name="megadetector_and_video_height_width",
parameters={
"megadetector_lite_config": {"confidence": 0.01},
"frame_selection_height": 50,
"frame_selection_width": 50,
"total_frames": 10,
"crop_bottom_pixels": 2,
"fps": 2,
"ensure_total_frames": False,
},
expected_output={
"height": assert_crop_bottom_pixels,
"width": 50,
"frames": assert_megadetector_le_total,
},
),
]
def get_video_metadata():
test_video_paths = sorted([path for path in TEST_VIDEOS_DIR.rglob("*") if path.is_file()])
video_metadata = []
for video_path in test_video_paths:
frames, height, width, channels = load_video_frames(video_path).shape
video_metadata.append(
{
"path": video_path,
"frames": frames,
"height": height,
"width": width,
"channels": channels,
}
)
return video_metadata
video_metadata_values = get_video_metadata()
@pytest.fixture(
params=video_metadata_values[:1],
ids=[metadata["path"].stem for metadata in video_metadata_values[:1]],
)
def video_metadata(request):
return request.param
@pytest.fixture(params=test_cases, ids=[case.name for case in test_cases])
def case(request):
return request.param
def test_load_video_frames(case: Case, video_metadata: Dict[str, Any]):
"""Tests all pairs of test cases and test videos."""
if case.validation_error:
with pytest.raises(ValidationError):
load_video_frames(video_metadata["path"], **case.parameters)
else:
video_shape = load_video_frames(video_metadata["path"], **case.parameters).shape
video_shape = dict(zip(("frames", "height", "width", "channels"), video_shape))
if case.expected_output is not None:
for field, value in case.expected_output.items():
if callable(value):
value(video_metadata, video_shape, **case.parameters)
else:
assert video_shape[field] == value
def test_same_filename_new_kwargs(tmp_path, train_metadata):
"""Test that load_video_frames does not load the npz file if the params change."""
cache = tmp_path / "test_cache"
# prep labels for one video
labels = (
train_metadata[train_metadata.split == "train"]
.set_index("filepath")
.filter(regex="species")
.head(1)
)
def _generate_dataset(config):
"""Return loaded video from FFmpegZambaVideoDataset."""
return FfmpegZambaVideoDataset(annotations=labels, video_loader_config=config).__getitem__(
index=0
)[0]
with mock.patch.dict(os.environ, {"VIDEO_CACHE_DIR": str(cache)}):
# confirm cache is set in environment variable
assert os.environ["VIDEO_CACHE_DIR"] == str(cache)
first_load = _generate_dataset(config=VideoLoaderConfig(fps=1))
new_params_same_name = _generate_dataset(config=VideoLoaderConfig(fps=2))
assert first_load.shape != new_params_same_name.shape
# check no params
no_params_same_name = _generate_dataset(config=None)
assert first_load.shape != new_params_same_name.shape != no_params_same_name.shape
# multiple params in config
first_load = _generate_dataset(config=VideoLoaderConfig(scene_threshold=0.2))
new_params_same_name = _generate_dataset(
config=VideoLoaderConfig(scene_threshold=0.2, crop_bottom_pixels=2)
)
assert first_load.shape != new_params_same_name.shape
def test_megadetector_lite_yolox_dog(tmp_path):
dog = Image.open(ASSETS_DIR / "dog.jpg")
blank = Image.new("RGB", dog.size, (64, 64, 64))
total_frames = 10
object_frame_indices = [0, 3, 4, 6]
frame_directory = tmp_path / "dog"
frame_directory.mkdir()
for frame_index in range(total_frames):
frame = dog if frame_index in object_frame_indices else blank
frame.save(frame_directory / f"frame{frame_index:02}.jpg")
subprocess.call(
[
"ffmpeg",
"-r",
"30",
"-f",
"image2",
"-s",
f"{dog.size[0]}x{dog.size[1]}",
"-i",
str(frame_directory / "frame%02d.jpg"),
"-vcodec",
"libx264",
"-crf",
"23",
"-pix_fmt",
"yuv420p",
str(tmp_path / "dog.mp4"),
"-v",
"quiet",
"-hide_banner",
"-y",
]
)
frames = load_video_frames(
tmp_path / "dog.mp4", megadetector_lite_config=MegadetectorLiteYoloXConfig()
)
# Check that we detected the correct number of frames
assert len(frames) == len(object_frame_indices)
# Check that no blank frames were selected
for frame in frames:
assert not (frame == np.ones(frame.shape, dtype=np.uint8) * 64).all()
def test_resize_after_frame_selection():
test_vid = TEST_VIDEOS_DIR / "data" / "raw" / "benjamin" / "04250002.MP4"
resize_before_vlc = VideoLoaderConfig(
frame_selection_height=10,
frame_selection_width=12,
ensure_total_frames=True,
megadetector_lite_config={
"confidence": 0.25,
"fill_mode": "score_sorted",
"n_frames": 16,
},
)
a = load_video_frames(filepath=test_vid, config=resize_before_vlc)
# use full size image for MDLite
resize_after_vlc = VideoLoaderConfig(
model_input_height=10,
model_input_width=12,
ensure_total_frames=True,
megadetector_lite_config={
"confidence": 0.25,
"fill_mode": "score_sorted",
"n_frames": 16,
},
)
b = load_video_frames(filepath=test_vid, config=resize_after_vlc)
# shapes should be the same
assert a.shape == b.shape
# but we expect some frame differences
assert (a != b).any()
def test_validate_total_frames():
config = VideoLoaderConfig(
megadetector_lite_config=MegadetectorLiteYoloXConfig(confidence=0.01, n_frames=None),
total_frames=10,
)
assert config.megadetector_lite_config.n_frames == 10
config = VideoLoaderConfig(
megadetector_lite_config=MegadetectorLiteYoloXConfig(confidence=0.01, n_frames=8),
)
assert config.total_frames == 8
def test_caching(tmp_path, caplog, train_metadata):
cache = tmp_path / "video_cache"
# prep labels for one video
labels = (
train_metadata[train_metadata.split == "train"]
.set_index("filepath")
.filter(regex="species")
.head(1)
)
# no caching by default
_ = FfmpegZambaVideoDataset(
annotations=labels,
).__getitem__(index=0)
assert not cache.exists()
# caching can be specifed in config
_ = FfmpegZambaVideoDataset(
annotations=labels, video_loader_config=VideoLoaderConfig(fps=1, cache_dir=cache)
).__getitem__(index=0)
# one file in cache
assert len([f for f in cache.rglob("*") if f.is_file()]) == 1
shutil.rmtree(cache)
# or caching can be specified in environment variable
with mock.patch.dict(os.environ, {"VIDEO_CACHE_DIR": str(cache)}):
_ = FfmpegZambaVideoDataset(
annotations=labels,
).__getitem__(index=0)
assert len([f for f in cache.rglob("*") if f.is_file()]) == 1
# changing cleanup in config does not prompt new hashing of videos
with mock.patch.dict(os.environ, {"LOG_LEVEL": "DEBUG"}):
_ = FfmpegZambaVideoDataset(
annotations=labels, video_loader_config=VideoLoaderConfig(cleanup_cache=True)
).__getitem__(index=0)
assert "Loading from cache" in caplog.text
# if no config is passed, this is equivalent to specifying None/False in all non-cache related VLC params
no_config = FfmpegZambaVideoDataset(annotations=labels, video_loader_config=None).__getitem__(
index=0
)[0]
config_with_nones = FfmpegZambaVideoDataset(
annotations=labels,
video_loader_config=VideoLoaderConfig(
crop_bottom_pixels=None,
i_frames=False,
scene_threshold=None,
megadetector_lite_config=None,
frame_selection_height=None,
frame_selection_width=None,
total_frames=None,
ensure_total_frames=False,
fps=None,
early_bias=False,
frame_indices=None,
evenly_sample_total_frames=False,
pix_fmt="rgb24",
model_input_height=None,
model_input_width=None,
),
).__getitem__(index=0)[0]
assert np.array_equal(no_config, config_with_nones)
def test_validate_video_cache_dir():
with mock.patch.dict(os.environ, {"VIDEO_CACHE_DIR": "example_cache_dir"}):
config = VideoLoaderConfig()
assert config.cache_dir == Path("example_cache_dir")
for cache in ["", 0]:
with mock.patch.dict(os.environ, {"VIDEO_CACHE_DIR": str(cache)}):
config = VideoLoaderConfig()
assert config.cache_dir is None
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_cli.py | tests/test_cli.py | import os
from pathlib import Path
import shutil
from typer.testing import CliRunner
import pandas as pd
import pytest
from pytest_mock import mocker # noqa: F401
from zamba.cli import app
from conftest import ASSETS_DIR, TEST_VIDEOS_DIR
runner = CliRunner()
@pytest.fixture
def minimum_valid_train(labels_absolute_path):
return ["train", "--labels", str(labels_absolute_path), "--skip-load-validation"]
@pytest.fixture
def minimum_valid_predict():
return ["predict", "--data-dir", str(TEST_VIDEOS_DIR), "--skip-load-validation"]
# mock training to just test CLI args
def train_mock(self):
return None
# mock predictions to just test CLI args
def pred_mock(self):
return None
def test_train_specific_options(mocker, minimum_valid_train, tmp_path): # noqa: F811
mocker.patch("zamba.cli.ModelManager.train", train_mock)
# check labels must exist
result = runner.invoke(app, ["train", "--labels", "my_labels.csv"])
assert result.exit_code == 2
assert "'my_labels.csv' does not exist." in result.output
# check data dir must exist
result = runner.invoke(app, ["train", "--data-dir", "my_data"])
assert result.exit_code == 2
assert "Path 'my_data' does not exist." in result.output
# test from config
result = runner.invoke(
app, ["train", "--config", str(ASSETS_DIR / "sample_train_config.yaml")]
)
assert result.exit_code == 0
assert f"Config file: {str(ASSETS_DIR / 'sample_train_config.yaml')}" in result.output
result = runner.invoke(app, minimum_valid_train + ["--save-dir", str(tmp_path)])
assert result.exit_code == 0
def test_shared_cli_options(mocker, minimum_valid_train, minimum_valid_predict): # noqa: F811
"""Test CLI options that are shared between train and predict commands."""
mocker.patch("zamba.cli.ModelManager.train", train_mock)
mocker.patch("zamba.cli.ModelManager.predict", pred_mock)
for command in [minimum_valid_train, minimum_valid_predict]:
# check default model is time distributed one
result = runner.invoke(app, command)
assert result.exit_code == 0
assert "Config file: None" in result.output
# check all models options are valid
for model in ["time_distributed", "slowfast", "european", "blank_nonblank"]:
result = runner.invoke(app, command + ["--model", model])
assert result.exit_code == 0
# check invalid model name raises error
result = runner.invoke(app, command + ["--model", "my_model"])
assert result.exit_code == 2
assert "Invalid value" in result.output
# test batch size, gpus, num_workers, and dry run options
result = runner.invoke(
app,
command
+ [
"--batch-size",
"10",
"--gpus",
"0",
"--num-workers",
"1",
"--dry-run",
"-y",
],
)
assert result.exit_code == 0
# invalid extra arg
result = runner.invoke(app, command + ["--bad-arg"])
assert result.exit_code == 2
assert "no such option" in str(result.output).lower()
# validation error with too many gpus
result = runner.invoke(app, command + ["--gpus", "2"])
assert result.exit_code == 1
assert "Cannot use 2" in str(result.exc_info)
def test_predict_specific_options(mocker, minimum_valid_predict, tmp_path): # noqa: F811
mocker.patch("zamba.cli.ModelManager.predict", pred_mock)
# check data dir must exist
result = runner.invoke(app, ["predict", "--data-dir", "my_data"])
assert result.exit_code == 2
assert "Path 'my_data' does not exist." in result.output
# check checkpoint must exist
result = runner.invoke(
app,
minimum_valid_predict + ["--checkpoint", "my_checkpoint.ckpt"],
)
assert result.exit_code == 2
assert "Path 'my_checkpoint.ckpt' does not exist." in result.output
# test prob_threshold invalid option
result = runner.invoke(app, minimum_valid_predict + ["--proba-threshold", 5])
assert result.exit_code == 1
assert (
"Setting proba_threshold outside of the range (0, 1) will cause all probabilities to be rounded to the same value"
in str(result.exc_info)
)
# test valid output args
result = runner.invoke(
app,
minimum_valid_predict + ["--proba-threshold", "0.5", "--no-save"],
)
assert result.exit_code == 0
result = runner.invoke(
app,
minimum_valid_predict + ["--output-class-names", "--save"],
)
assert result.exit_code == 0
# test save overwrite
(tmp_path / "zamba_predictions.csv").touch()
result = runner.invoke(
app,
minimum_valid_predict + ["--output-class-names", "--save-dir", str(tmp_path), "-o"],
)
assert result.exit_code == 0
@pytest.mark.parametrize("model", ["time_distributed", "blank_nonblank"])
def test_actual_prediction_on_single_video(tmp_path, model): # noqa: F811
data_dir = tmp_path / "videos"
data_dir.mkdir()
shutil.copy(TEST_VIDEOS_DIR / "data" / "raw" / "benjamin" / "04250002.MP4", data_dir)
save_dir = tmp_path / "zamba"
result = runner.invoke(
app,
[
"predict",
"--data-dir",
str(data_dir),
"--config",
str(ASSETS_DIR / "sample_predict_config.yaml"),
"--yes",
"--save-dir",
str(save_dir),
"--model",
model,
],
)
assert result.exit_code == 0
# check preds file got saved out
assert save_dir.exists()
# check config got saved out too
assert (save_dir / "predict_configuration.yaml").exists()
assert (
pd.read_csv(save_dir / "zamba_predictions.csv", index_col="filepath")
.idxmax(axis=1)
.values[0]
== "blank"
)
def test_actual_prediction_on_images(tmp_path, mocker): # noqa: F811
"""Test predicting on images."""
shutil.copytree(ASSETS_DIR / "images", tmp_path / "images")
data_dir = tmp_path / "images"
save_dir = tmp_path / "zamba"
result = runner.invoke(
app,
[
"image",
"predict",
"--data-dir",
str(data_dir),
"--yes",
"--save-dir",
str(save_dir),
],
)
assert result.exit_code == 0
# check preds file got saved out
assert save_dir.exists()
df = pd.read_csv(save_dir / "zamba_predictions.csv", index_col="filepath")
for img, label in df.idxmax(axis=1).items():
# skip any assets that are not specifically designed for this test
if Path(img).stem in df.columns:
assert Path(img).stem == label
def test_depth_cli_options(mocker, tmp_path): # noqa: F811
mocker.patch("zamba.models.depth_estimation.config.DepthEstimationConfig.run_model", pred_mock)
result = runner.invoke(
app,
[
"depth",
"--help",
],
)
assert result.exit_code == 0
assert "Estimate animal distance" in result.output
result = runner.invoke(
app,
[
"depth",
"--data-dir",
str(TEST_VIDEOS_DIR),
"--save-to",
str(tmp_path),
"--batch-size",
12,
"--weight-download-region",
"asia",
"--yes",
],
)
assert result.exit_code == 0
assert "The following configuration will be used" in result.output
@pytest.mark.skipif(
not bool(int(os.environ.get("ZAMBA_RUN_DENSEPOSE_TESTS", 0))),
reason="""Skip the densepose specific tests unless environment variable \
ZAMBA_RUN_DENSEPOSE_TESTS is set to 1.""",
)
def test_densepose_cli_options(mocker): # noqa: F811
"""Test CLI options that are shared between train and predict commands."""
mocker.patch("zamba.models.densepose.config.DensePoseConfig.run_model", pred_mock)
result = runner.invoke(
app,
[
"densepose",
"--help",
],
)
assert result.exit_code == 0
assert "Run densepose algorithm on videos." in result.output
result = runner.invoke(
app,
["densepose", "--data-dir", str(ASSETS_DIR / "densepose_tests"), "--yes"],
)
assert result.exit_code == 0
assert "The following configuration will be used" in result.output
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/conftest.py | tests/conftest.py | import logging
import os
import random
import string
from typing import Optional, Union
from loguru import logger
import pandas as pd
from pathlib import Path
import pytest
from _pytest.logging import caplog as _caplog # noqa: F401
import torch
from zamba.data.video import VideoLoaderConfig
from zamba.models.config import PredictConfig, TrainConfig
from zamba.models.model_manager import MODEL_MAPPING, train_model
from zamba.models.registry import register_model
from zamba.object_detection.yolox.megadetector_lite_yolox import MegadetectorLiteYoloX
from zamba.pytorch.transforms import zamba_image_model_transforms
from zamba.pytorch_lightning.video_modules import (
ZambaClassificationLightningModule,
ZambaVideoClassificationLightningModule,
)
ASSETS_DIR = Path(__file__).parent / "assets"
TEST_VIDEOS_DIR = ASSETS_DIR / "videos"
random.seed(56745)
@register_model
class DummyZambaVideoClassificationLightningModule(ZambaVideoClassificationLightningModule):
"""A dummy model whose linear weights start out as all zeros."""
_default_model_name = "dummy_model" # used to look up default configuration for checkpoints
def __init__(
self,
num_frames: int,
num_hidden: int,
finetune_from: Optional[Union[os.PathLike, str]] = None,
**kwargs,
):
super().__init__(**kwargs)
if finetune_from is None:
backbone = torch.nn.Linear(num_frames, num_hidden)
torch.nn.init.ones_(backbone.weight)
else:
backbone = self.from_disk(finetune_from).backbone
for param in backbone.parameters():
param.requires_grad = False
head = torch.nn.Linear(num_hidden, self.num_classes)
torch.nn.init.zeros_(head.weight)
self.backbone = backbone
self.head = head
self.model = torch.nn.Sequential(
torch.nn.AdaptiveAvgPool3d(1), torch.nn.Flatten(), backbone, head
)
self.save_hyperparameters("num_frames", "num_hidden")
def forward(self, x, *args, **kwargs):
return self.model(x)
MODEL_MAPPING["DummyZambaVideoClassificationLightningModule"] = {
"transform": zamba_image_model_transforms()
}
@register_model
class DummyZambaImageClassificationLightningModule(ZambaClassificationLightningModule):
"""A dummy model whose linear weights start out as all zeros."""
_default_model_name = "dummy_model" # used to look up default configuration for checkpoints
def __init__(
self,
in_features: int = 224 * 224 * 3,
num_hidden: int = 1,
**kwargs,
):
super().__init__(**kwargs)
backbone = torch.nn.Linear(in_features, num_hidden)
torch.nn.init.ones_(backbone.weight)
self.loss_fn = torch.nn.CrossEntropyLoss()
for param in backbone.parameters():
param.requires_grad = False
head = torch.nn.Linear(num_hidden, self.num_classes)
torch.nn.init.zeros_(head.weight)
self.backbone = backbone
self.head = head
self.model = torch.nn.Sequential(torch.nn.Flatten(), backbone, head)
self.save_hyperparameters("num_hidden")
def forward(self, x, *args, **kwargs):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y = torch.nn.functional.one_hot(y, num_classes=self.num_classes).to(torch.float)
y_hat = self.model(x)
loss = self.loss_fn(y_hat, y)
return loss
def _val_step(self, batch, batch_idx, subset):
x, y = batch
y = torch.nn.functional.one_hot(y, num_classes=self.num_classes).to(torch.float)
y_hat = self(x)
loss = self.loss_fn(y_hat, y)
self.log(f"{subset}_loss", loss.detach(), sync_dist=True, reduce_fx="mean")
return (
y.cpu().numpy().astype(int),
y_hat.cpu().numpy(),
)
def validation_step(self, batch, batch_idx):
output = self._val_step(batch, batch_idx, "val")
self.validation_step_outputs.append(output)
return output
def test_step(self, batch, batch_idx):
output = self._val_step(batch, batch_idx, "test")
self.test_step_outputs.append(output)
return output
MODEL_MAPPING["DummyZambaImageClassificationLightningModule"] = {
"transform": zamba_image_model_transforms()
}
class DummyTrainConfig(TrainConfig):
# let model name be "dummy" without causing errors
model_name: str
batch_size = 1
max_epochs = 1
model_name = "dummy"
skip_load_validation = True
auto_lr_find = False
@pytest.fixture(scope="session")
def labels_relative_path() -> os.PathLike:
return ASSETS_DIR / "labels.csv"
@pytest.fixture(scope="session")
def labels_json_relative_path() -> os.PathLike:
return ASSETS_DIR / "labels.csv"
def labels_n_classes_df(n_classes):
"""Get up a labels dataframe where the labels are up to
26 classes.
"""
if n_classes > len(string.ascii_uppercase):
raise ValueError("n_classes must be less than 26")
choices = string.ascii_uppercase[:n_classes]
df = pd.read_csv(ASSETS_DIR / "labels.csv")
df.label = random.choices(choices, k=len(df))
return df
@pytest.fixture(scope="session")
def labels_absolute_path(tmp_path_factory) -> os.PathLike:
tmp_path = tmp_path_factory.mktemp("dummy-model-dir")
output_path = tmp_path / "labels.csv"
df = pd.read_csv(ASSETS_DIR / "labels.csv")
df["filepath"] = (str(TEST_VIDEOS_DIR) / df.filepath.path).path.resolve()
df.to_csv(output_path, index=False)
return output_path
@pytest.fixture(scope="session")
def labels_no_splits(labels_absolute_path, tmp_path_factory) -> os.PathLike:
tmp_path = tmp_path_factory.mktemp("dummy-model-dir")
pd.read_csv(labels_absolute_path, usecols=["filepath", "label"]).to_csv(
tmp_path / "labels_no_splits.csv", index=False
)
return tmp_path / "labels_no_splits.csv"
@pytest.fixture(scope="session")
def filepaths(labels_absolute_path, tmp_path_factory) -> os.PathLike:
tmp_path = tmp_path_factory.mktemp("dummy-model-dir")
pd.read_csv(labels_absolute_path, usecols=["filepath"]).to_csv(
tmp_path / "filepaths.csv", index=False
)
return tmp_path / "filepaths.csv"
@pytest.fixture(scope="session")
def train_metadata(labels_absolute_path) -> pd.DataFrame:
return TrainConfig(labels=labels_absolute_path).labels
@pytest.fixture(scope="session")
def predict_metadata(filepaths) -> pd.DataFrame:
return PredictConfig(filepaths=filepaths).filepaths
@pytest.fixture(scope="session")
def time_distributed_checkpoint(labels_absolute_path) -> os.PathLike:
return TrainConfig(labels=labels_absolute_path, model_name="time_distributed").checkpoint
@pytest.fixture(scope="session")
def mdlite():
return MegadetectorLiteYoloX()
@pytest.fixture(scope="session")
def dummy_checkpoint(labels_absolute_path, tmp_path_factory) -> os.PathLike:
tmp_path = tmp_path_factory.mktemp("dummy-model-dir")
labels = pd.read_csv(labels_absolute_path)
species = list(labels.label.unique())
output_path = tmp_path / "dummy.ckpt"
DummyZambaVideoClassificationLightningModule(
num_frames=4, num_hidden=1, species=species
).to_disk(output_path)
return output_path
@pytest.fixture(scope="session")
def dummy_train_config(labels_absolute_path, dummy_checkpoint, tmp_path_factory):
tmp_path = tmp_path_factory.mktemp("dummy-model-dir")
return DummyTrainConfig(
labels=labels_absolute_path,
data_dir=TEST_VIDEOS_DIR,
model_name="dummy",
checkpoint=dummy_checkpoint,
max_epochs=1,
batch_size=1,
auto_lr_find=False,
num_workers=2,
save_dir=tmp_path / "my_model",
skip_load_validation=True,
)
@pytest.fixture(scope="session")
def dummy_video_loader_config():
return VideoLoaderConfig(total_frames=4, frame_selection_height=19, frame_selection_width=19)
@pytest.fixture(scope="session")
def dummy_trainer(dummy_train_config, dummy_video_loader_config):
return train_model(
train_config=dummy_train_config, video_loader_config=dummy_video_loader_config
)
@pytest.fixture(scope="session")
def dummy_trained_model_checkpoint(dummy_trainer):
# get saved out checkpoint from trainer
return next(iter((Path(dummy_trainer.logger.log_dir).glob("*.ckpt"))))
@pytest.fixture
def caplog(_caplog): # noqa: F811
"""Used to test logging messages from loguru per:
https://loguru.readthedocs.io/en/stable/resources/migration.html#making-things-work-with-pytest-and-caplog
"""
class PropogateHandler(logging.Handler):
def emit(self, record):
logging.getLogger(record.name).handle(record)
handler_id = logger.add(PropogateHandler(), format="{message} {extra}")
yield _caplog
logger.remove(handler_id)
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_transforms.py | tests/test_transforms.py | import torch
from zamba.pytorch.transforms import PadDimensions
def test_pad_dimensions():
# do not change size of None dimensions
pad = PadDimensions((None, 2))
x = torch.randn(3, 1)
padded_x = pad(x)
assert padded_x.shape == torch.Size([3, 2])
assert (pad(x)[:, 1:] == x).all()
# pad a few more dimensions
pad = PadDimensions((None, 5, None, 7))
x = torch.randn(2, 3, 4, 5)
padded_x = pad(x)
assert padded_x.shape == torch.Size([2, 5, 4, 7])
assert (padded_x[:, 1:-1, :, 1:-1] == x).all()
# do not change sizes for if dimension is larger than requested
pad = PadDimensions((None, 5, None, 4))
x = torch.randn(2, 3, 4, 5)
padded_x = pad(x)
assert padded_x.shape == torch.Size([2, 5, 4, 5])
assert (padded_x[:, 1:-1, :, :] == x).all()
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_filter_frames.py | tests/test_filter_frames.py | import numpy as np
import pytest
from zamba.object_detection.yolox.megadetector_lite_yolox import MegadetectorLiteYoloXConfig
n_frames = 100
rng = np.random.RandomState(68891)
@pytest.fixture
def frames():
# 20 6x8 RGB frames where the upper-right hand pixel is the frame index
frames = rng.randint(0, 255, size=(n_frames, 6, 8, 3), dtype=np.uint8)
frames[:, 0, 0, 0] = np.arange(n_frames, dtype=np.uint8)
return frames
@pytest.fixture
def detections():
# Frame scores that increase monotonically
scores = np.zeros(n_frames, dtype=float)
scores[np.arange(0, n_frames, 10)] = np.arange(0, n_frames, 10)
# Include a second score for each frame, always smaller 1 so that when we consolidate by
# frame only the first score matters
scores = np.c_[scores, rng.random(n_frames)]
boxes = rng.random(size=(n_frames, 4)) * 100
return [(box, score) for box, score in zip(boxes, scores)]
def test_reduce_selection(mdlite, frames, detections):
"""Tests the case where object detection selects more than enough frames."""
mdlite.config = MegadetectorLiteYoloXConfig(
confidence=10, n_frames=5, fill_mode="repeat", sort_by_time=False
)
filtered_frames = mdlite.filter_frames(frames, detections)
assert (filtered_frames[:, 0, 0, 0] == np.array([90, 80, 70, 60, 50])).all()
def test_repeat(mdlite, frames, detections):
mdlite.config = MegadetectorLiteYoloXConfig(
confidence=50, n_frames=5, fill_mode="repeat", sort_by_time=False
)
filtered_frames = mdlite.filter_frames(frames, detections)
assert (filtered_frames[:, 0, 0, 0] == np.array([90, 80, 70, 60, 80])).all()
def test_score_sorted(mdlite, frames, detections):
mdlite.config = MegadetectorLiteYoloXConfig(
confidence=50, n_frames=5, fill_mode="score_sorted", sort_by_time=False
)
filtered_frames = mdlite.filter_frames(frames, detections)
assert (filtered_frames[:, 0, 0, 0] == np.array([90, 80, 70, 60, 50])).all()
def test_sort_by_time(mdlite, frames, detections):
mdlite.config = MegadetectorLiteYoloXConfig(
confidence=50, n_frames=5, fill_mode="repeat", sort_by_time=True
)
filtered_frames = mdlite.filter_frames(frames, detections)
assert (filtered_frames[:, 0, 0, 0] == np.array([60, 70, 80, 80, 90])).all()
def test_weighted_euclidean(mdlite, frames, detections):
mdlite.config = MegadetectorLiteYoloXConfig(
confidence=50, n_frames=20, fill_mode="weighted_euclidean", sort_by_time=False
)
filtered_frames = mdlite.filter_frames(frames, detections)
assert (
filtered_frames[:, 0, 0, 0]
== np.array(
[90, 80, 70, 60, 44, 67, 73, 5, 54, 64, 65, 34, 93, 72, 56, 50, 87, 83, 47, 88]
)
).all()
def test_weighted_prob(mdlite, frames, detections):
mdlite.config = MegadetectorLiteYoloXConfig(
confidence=50, n_frames=20, fill_mode="weighted_prob", sort_by_time=False
)
filtered_frames = mdlite.filter_frames(frames, detections)
assert (
filtered_frames[:, 0, 0, 0]
== np.array(
[90, 80, 70, 60, 50, 87, 30, 40, 34, 10, 22, 20, 71, 16, 39, 14, 77, 65, 42, 13]
)
).all()
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_megadetector_lite_yolox.py | tests/test_megadetector_lite_yolox.py | import json
import numpy as np
from PIL import Image
import pytest
import torch
from zamba.object_detection import YoloXModel, YoloXExp, YoloXArgs
from zamba.object_detection.yolox.megadetector_lite_yolox import (
MegadetectorLiteYoloX,
MegadetectorLiteYoloXConfig,
)
from conftest import ASSETS_DIR
@pytest.fixture
def dog():
return Image.open(ASSETS_DIR / "dog.jpg")
@pytest.fixture
def dummy_yolox_path(tmp_path):
yolox = YoloXModel(exp=YoloXExp(num_classes=1), args=YoloXArgs())
checkpoint = {"model": yolox.exp.get_model().state_dict()}
torch.save(checkpoint, tmp_path / "dummy_yolox.pth")
return tmp_path / "dummy_yolox.pth"
@pytest.fixture
def dummy_yolox_model_kwargs(tmp_path):
kwargs = dict(num_classes=1, image_size=640, backbone="yolox-tiny")
json_path = tmp_path / "dummy_yolox_kwargs.json"
with json_path.open("w+") as f:
json.dump(kwargs, f)
return json_path
def test_load_megadetector(dummy_yolox_path, dummy_yolox_model_kwargs):
MegadetectorLiteYoloX(
dummy_yolox_path, dummy_yolox_model_kwargs, MegadetectorLiteYoloXConfig()
)
def test_scale_and_pad_array():
original_array = np.random.randint(0, 256, size=(3, 3, 3), dtype=np.uint8)
output_array = MegadetectorLiteYoloX.scale_and_pad_array(
original_array, output_width=3, output_height=4
)
assert output_array.shape == (4, 3, 3)
assert (output_array[:3] == original_array).all()
assert (output_array[3] == np.zeros(3)).all()
output_array = MegadetectorLiteYoloX.scale_and_pad_array(
original_array, output_width=4, output_height=3
)
assert output_array.shape == (3, 4, 3)
assert (output_array[:, :3] == original_array).all()
assert (output_array[:, 3] == np.zeros(3)).all()
def test_detect_image(mdlite, dog):
mdlite = MegadetectorLiteYoloX()
boxes, scores = mdlite.detect_image(np.array(dog))
assert len(scores) == 1
assert np.allclose([0.65678996, 0.21596366, 0.71104807, 0.277931], boxes[0], atol=1e-3)
def test_detect_video(mdlite, dog):
total_frames = 10
object_frame_indices = [0, 3, 4, 6]
video = np.zeros([total_frames] + list(dog.size[::-1]) + [3], dtype=np.uint8)
video[object_frame_indices] = np.array(dog)
mdlite = MegadetectorLiteYoloX()
detections = mdlite.detect_video(video)
# Check that we detected the correct number of frames
assert sum(len(score) > 0 for _, score in detections) == len(object_frame_indices)
for frame_index, (frame, (_, score)) in enumerate(zip(video, detections)):
if len(score) > 0:
# Frame index is in intended frame indices
assert frame_index in object_frame_indices
# No blank frames were selected
assert not (frame == np.zeros(frame.shape, dtype=np.uint8)).all()
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_zamba_video_classification_lightning_module.py | tests/test_zamba_video_classification_lightning_module.py | import pytest
from zamba.models.slowfast_models import SlowFast
from zamba.models.efficientnet_models import TimeDistributedEfficientNet
from conftest import DummyZambaVideoClassificationLightningModule
@pytest.mark.parametrize("model_class", (SlowFast, TimeDistributedEfficientNet))
def test_save_and_load(model_class, tmp_path):
model = model_class(species=["cat", "dog"])
model.to_disk(tmp_path / model_class.__name__)
model = model_class.from_disk(tmp_path / model_class.__name__)
assert model.species == ["cat", "dog"]
assert model.num_classes == 2
def test_load_dummy(dummy_checkpoint, tmp_path):
model = DummyZambaVideoClassificationLightningModule.from_disk(dummy_checkpoint)
assert (model.head.weight == 0).all()
assert (model.backbone.weight == 1).all()
def test_save_and_load_trainer_checkpoint(dummy_trainer, tmp_path):
# Check that frozen backbone did not change
backbone = dummy_trainer.model.model[2]
assert (backbone.weight == 1).all()
# Check that model learned something during training
linear = dummy_trainer.model.model[3]
assert not (linear.weight == 0).all()
# Save checkpoint from trainer
dummy_trainer.save_checkpoint(tmp_path / "dummy.ckpt")
# Load checkpoint
model = DummyZambaVideoClassificationLightningModule.from_disk(tmp_path / "dummy.ckpt")
loaded_backbone = model.model[2]
assert (loaded_backbone.weight == 1).all()
for param in backbone.parameters():
assert not param.requires_grad
loaded_linear = model.model[3]
assert (loaded_linear.weight == linear.weight).all()
for param in loaded_linear.parameters():
assert param.requires_grad
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_images.py | tests/test_images.py | import json
import logging
import numpy as np
import pandas as pd
import pytest
from PIL import Image
from zamba.images.bbox import BboxInputFormat, bbox_json_to_df, BboxLayout
from zamba.images.classifier import ImageClassifierModule
from zamba.images.config import ImageClassificationTrainingConfig
from zamba.images.data import absolute_bbox
from zamba.images.dataset.dataset import crop_image, prepare_dataset
from zamba.images.manager import train
from zamba.images.result import results_to_megadetector_format
from conftest import ASSETS_DIR, DummyZambaImageClassificationLightningModule
@pytest.fixture
def annotations():
with open(ASSETS_DIR / "labels.json", "r") as f:
return json.load(f)
@pytest.fixture
def annotation():
return {
"id": "0003",
"category_id": 6,
"image_id": "ffe3",
"bbox": [131, 220, 178, 319],
}
@pytest.fixture
def dog():
return Image.open(ASSETS_DIR / "dog.jpg")
@pytest.fixture
def trimmed_dog():
return Image.open(ASSETS_DIR / "images/crop/dog.jpg")
@pytest.fixture
def labels_path():
return ASSETS_DIR / "images/labels.csv"
@pytest.fixture
def images_path():
return ASSETS_DIR
@pytest.fixture
def megadetector_output_path():
return ASSETS_DIR / "images/megadetector_output.json"
@pytest.fixture
def dataframe_result_csv_path():
return ASSETS_DIR / "images/df_result.csv"
@pytest.fixture
def splits():
return {"splits": {"train": ["A", "B"], "test": ["C"], "val": ["D"]}}
@pytest.fixture
def categories_map():
return {
"Test1": "Test1",
"Test2": "Test2",
"Test3": "Test3",
"Test4": "Test4",
"Test5": "Test5",
"Test6": "Test6",
}
@pytest.fixture
def dummy_checkpoint(labels_path, tmp_path_factory):
tmp_path = tmp_path_factory.mktemp("dummy-model-dir")
labels = pd.read_csv(labels_path)
species = list(labels.label.unique())
output_path = tmp_path / "dummy.ckpt"
DummyZambaImageClassificationLightningModule(num_hidden=1, species=species).to_disk(
output_path
)
return output_path
def test_prepare_dataset_split(annotations, categories_map, splits):
result = prepare_dataset(
annotations,
splits,
"s3://example.example/example",
categories_map,
"example-name",
)
assert len(result) == 4
assert len(result[result["split"] == "train"]) == 2
assert len(result[result["split"] == "test"]) == 1
assert len(result[result["split"] == "val"]) == 1
def test_image_annotation_crop_images_the_same_size(annotation, dog, trimmed_dog):
result = crop_image(dog, annotation["bbox"])
assert result.height == trimmed_dog.height
assert result.width == trimmed_dog.width
def test_train_config_validate_labels_from_path(labels_path, images_path):
config = ImageClassificationTrainingConfig(data_dir=images_path, labels=labels_path)
assert isinstance(config.labels, pd.DataFrame)
def test_train_config_labels(labels_path, images_path):
config = ImageClassificationTrainingConfig(data_dir=images_path, labels=labels_path)
logging.warning(config.labels.head())
assert "label" in config.labels.columns
def test_train_config_data_exist(labels_path, images_path):
config = ImageClassificationTrainingConfig(data_dir=images_path, labels=labels_path)
assert len(config.labels) == 5
def test_absolute_bbox():
image = Image.fromarray(np.zeros((600, 800)))
bbox = [0.1, 0.25, 0.25, 0.5]
result = absolute_bbox(image, bbox, bbox_layout=BboxLayout.XYWH)
assert result[0] == 80
assert result[1] == 150
assert result[2] == 280
assert result[3] == 450
assert len(result) == 4
def test_absolute_bbox_xy():
image = Image.fromarray(np.zeros((600, 800)))
bbox = [200, 200, 400, 400]
result = absolute_bbox(image, bbox, bbox_layout=BboxLayout.XYXY)
assert result[0] == 200
assert result[1] == 200
assert result[2] == 400
assert result[3] == 400
assert len(result) == 4
def test_image_crop_from_megadetector_bbox(dog, trimmed_dog):
bbox = absolute_bbox(dog, [0.1718, 0.3836, 0.233, 0.5555], bbox_layout=BboxLayout.XYWH)
result = dog.crop(bbox)
logging.warning(result)
assert result.height == trimmed_dog.height
assert result.width == trimmed_dog.width
@pytest.mark.parametrize("model_class", (ImageClassifierModule,))
def test_save_and_load(model_class, tmp_path):
model = model_class(
species=["cat", "dog"], batch_size=2, image_size=224, model_name="resnet50"
)
model.to_disk(tmp_path / model_class.__name__)
model = model_class.from_disk(tmp_path / model_class.__name__)
assert model.species == ["cat", "dog"]
assert model.num_classes == 2
def test_bbox_json_to_df_format_megadetector(megadetector_output_path):
with open(megadetector_output_path, "r") as f:
bbox_json = json.load(f)
result = bbox_json_to_df(bbox_json, BboxInputFormat.MEGADETECTOR)
assert len(result) == 2
assert all(k in result.columns for k in ["x1", "x2", "y1", "y2"])
assert result.iloc[0]["filepath"] == "path/to/example.jpg"
assert result.iloc[0]["label"] == "bear"
assert result.iloc[0]["label_id"] == "3"
assert result.iloc[1]["label"] == "wolf"
assert result.iloc[1]["label_id"] == "2"
def test_results_to_megadetector_format(dataframe_result_csv_path):
df = pd.read_csv(dataframe_result_csv_path)
species = df.filter(like="species_").columns.tolist()
result = results_to_megadetector_format(df, species)
assert len(result.images) == 2
assert len(result.images[0].detections) == 2
assert len(result.images[1].detections) == 1
def test_train_integration(images_path, labels_path, dummy_checkpoint, tmp_path):
save_dir = tmp_path / "my_model"
checkpoint_path = tmp_path / "checkpoints"
config = ImageClassificationTrainingConfig(
data_dir=images_path,
labels=labels_path,
model_name="dummy",
max_epochs=1,
batch_size=1,
checkpoint=dummy_checkpoint,
checkpoint_path=checkpoint_path,
from_scratch=False,
save_dir=save_dir,
)
train(config)
assert save_dir.exists()
for f in ["train_configuration.yaml", "val_metrics.json", f"{config.model_name}.ckpt"]:
assert (config.save_dir / f).exists()
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/tests/test_npy_cache.py | tests/test_npy_cache.py | from pathlib import Path
import yaml
from zamba.data.video import (
VideoLoaderConfig,
npy_cache,
get_cached_array_path,
load_video_frames,
)
config_yaml = """
cache_dir: local_data/cache
crop_bottom_pixels: 50
early_bias: false
ensure_total_frames: true
evenly_sample_total_frames: false
fps: 4.0
frame_indices: null
frame_selection_height: null
frame_selection_width: null
i_frames: false
megadetector_lite_config:
confidence: 0.25
fill_mode: score_sorted
image_height: 640
image_width: 640
n_frames: 16
nms_threshold: 0.45
seed: 55
sort_by_time: true
model_input_height: 240
model_input_width: 426
pix_fmt: rgb24
scene_threshold: null
total_frames: 16
cleanup_cache: false
cache_dir: data/cache
"""
def test_get_cached_array_path():
config_dict = yaml.safe_load(config_yaml)
config = VideoLoaderConfig(**config_dict)
# NOTE: the validation in VideoLoaderConfig changes some fields,
# so dict(config) != config_dict
cached_load_video_frames = npy_cache(
cache_path=config.cache_dir, cleanup=config.cleanup_cache
)(load_video_frames)
assert isinstance(cached_load_video_frames, type(load_video_frames))
vid_path_str = "data/raw/noemie/Taï_cam197_683044_652175_20161223/01090065.AVI"
vid_path = Path(vid_path_str)
expected_cache_path = vid_path.with_suffix(".npy")
expected_hash = "2d1fee2b1e1f78d06aa08bdea88e7661f927bd81"
expected = config.cache_dir / expected_hash / expected_cache_path
# test video path as string or Path
for video_path in [vid_path_str, vid_path]:
path = get_cached_array_path(video_path, config)
assert path == expected
# pass the cache_dir as a Path
config_dict = yaml.safe_load(config_yaml)
config_dict["cache_dir"] = Path(config_dict["cache_dir"])
config = VideoLoaderConfig(**config_dict)
path = get_cached_array_path(vid_path, config)
assert path == expected
# changing config.cleanup_cache should not affect the key
config_dict = yaml.safe_load(config_yaml)
config_dict["cleanup_cache"] = True
config = VideoLoaderConfig(**config_dict)
path = get_cached_array_path(vid_path, config)
assert path == expected
# changing config.config_dir should change the path but not the hash
config_dict = yaml.safe_load(config_yaml)
config_dict["cache_dir"] = "something/else"
config = VideoLoaderConfig(**config_dict)
path = get_cached_array_path(vid_path, config)
expected_different_path = config.cache_dir / expected_hash / expected_cache_path
assert path == expected_different_path
# changing anything else should change the key but not the path
config_dict = yaml.safe_load(config_yaml)
config_dict["total_frames"] = 8
config = VideoLoaderConfig(**config_dict)
path = get_cached_array_path(vid_path, config)
different_hash = "9becb6d6dfe6b9970afe05af06ef49af4881bd73"
expected_different_hash = config.cache_dir / different_hash / expected_cache_path
assert path == expected_different_hash
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
drivendataorg/zamba | https://github.com/drivendataorg/zamba/blob/ce5d28ead22557e412d870780b0dbe3884bbd5cc/docs/generate_api_reference.py | docs/generate_api_reference.py | """Helper script for generating mkdocstrings pages in `docs/docs/api-reference`. Meant
to be run manually once in a while with output committed to repo."""
from pathlib import Path
from textwrap import dedent
from yaml import safe_load
DOCS_DIR = Path(__file__).parent
def generate_file(item: dict):
key = next(iter(item.keys()))
value = next(iter(item.values()))
if isinstance(value, list):
for sub_item in value:
generate_file(sub_item)
elif isinstance(value, str):
content = dedent(
f"""\
# {key}
::: {key}
"""
)
out_path = DOCS_DIR / "docs" / value
if not out_path.exists():
print(f"Creating {out_path} for {key}")
with out_path.open("w") as fh:
fh.write(content)
else:
print(f"Found existing {out_path}. Skipping.")
else:
raise ValueError(f"Something is wrong with this navitem: ({key}, {value})")
def main():
with (DOCS_DIR / "mkdocs.yml").open("r") as fh:
mkdocs = safe_load(fh)
for nav_item in mkdocs["nav"]:
if "API Reference" in nav_item:
api_reference = nav_item
break
(DOCS_DIR / "docs" / "api-reference").mkdir(parents=True, exist_ok=True)
for api_reference_item in api_reference["API Reference"]:
generate_file(api_reference_item)
if __name__ == "__main__":
main()
| python | MIT | ce5d28ead22557e412d870780b0dbe3884bbd5cc | 2026-01-05T07:12:27.321318Z | false |
ripexz/python-tkinter-minesweeper | https://github.com/ripexz/python-tkinter-minesweeper/blob/fc281a07ffdac8975ddaa7ed49a027c563c5f7a1/minesweeper.py | minesweeper.py | # Python Version 2.7.3
# File: minesweeper.py
from tkinter import *
from tkinter import messagebox as tkMessageBox
from collections import deque
import random
import platform
import time
from datetime import time, date, datetime
SIZE_X = 10
SIZE_Y = 10
STATE_DEFAULT = 0
STATE_CLICKED = 1
STATE_FLAGGED = 2
BTN_CLICK = "<Button-1>"
BTN_FLAG = "<Button-2>" if platform.system() == 'Darwin' else "<Button-3>"
window = None
class Minesweeper:
def __init__(self, tk):
# import images
self.images = {
"plain": PhotoImage(file = "images/tile_plain.gif"),
"clicked": PhotoImage(file = "images/tile_clicked.gif"),
"mine": PhotoImage(file = "images/tile_mine.gif"),
"flag": PhotoImage(file = "images/tile_flag.gif"),
"wrong": PhotoImage(file = "images/tile_wrong.gif"),
"numbers": []
}
for i in range(1, 9):
self.images["numbers"].append(PhotoImage(file = "images/tile_"+str(i)+".gif"))
# set up frame
self.tk = tk
self.frame = Frame(self.tk)
self.frame.pack()
# set up labels/UI
self.labels = {
"time": Label(self.frame, text = "00:00:00"),
"mines": Label(self.frame, text = "Mines: 0"),
"flags": Label(self.frame, text = "Flags: 0")
}
self.labels["time"].grid(row = 0, column = 0, columnspan = SIZE_Y) # top full width
self.labels["mines"].grid(row = SIZE_X+1, column = 0, columnspan = int(SIZE_Y/2)) # bottom left
self.labels["flags"].grid(row = SIZE_X+1, column = int(SIZE_Y/2)-1, columnspan = int(SIZE_Y/2)) # bottom right
self.restart() # start game
self.updateTimer() # init timer
def setup(self):
# create flag and clicked tile variables
self.flagCount = 0
self.correctFlagCount = 0
self.clickedCount = 0
self.startTime = None
# create buttons
self.tiles = dict({})
self.mines = 0
for x in range(0, SIZE_X):
for y in range(0, SIZE_Y):
if y == 0:
self.tiles[x] = {}
id = str(x) + "_" + str(y)
isMine = False
# tile image changeable for debug reasons:
gfx = self.images["plain"]
# currently random amount of mines
if random.uniform(0.0, 1.0) < 0.1:
isMine = True
self.mines += 1
tile = {
"id": id,
"isMine": isMine,
"state": STATE_DEFAULT,
"coords": {
"x": x,
"y": y
},
"button": Button(self.frame, image = gfx),
"mines": 0 # calculated after grid is built
}
tile["button"].bind(BTN_CLICK, self.onClickWrapper(x, y))
tile["button"].bind(BTN_FLAG, self.onRightClickWrapper(x, y))
tile["button"].grid( row = x+1, column = y ) # offset by 1 row for timer
self.tiles[x][y] = tile
# loop again to find nearby mines and display number on tile
for x in range(0, SIZE_X):
for y in range(0, SIZE_Y):
mc = 0
for n in self.getNeighbors(x, y):
mc += 1 if n["isMine"] else 0
self.tiles[x][y]["mines"] = mc
def restart(self):
self.setup()
self.refreshLabels()
def refreshLabels(self):
self.labels["flags"].config(text = "Flags: "+str(self.flagCount))
self.labels["mines"].config(text = "Mines: "+str(self.mines))
def gameOver(self, won):
for x in range(0, SIZE_X):
for y in range(0, SIZE_Y):
if self.tiles[x][y]["isMine"] == False and self.tiles[x][y]["state"] == STATE_FLAGGED:
self.tiles[x][y]["button"].config(image = self.images["wrong"])
if self.tiles[x][y]["isMine"] == True and self.tiles[x][y]["state"] != STATE_FLAGGED:
self.tiles[x][y]["button"].config(image = self.images["mine"])
self.tk.update()
msg = "You Win! Play again?" if won else "You Lose! Play again?"
res = tkMessageBox.askyesno("Game Over", msg)
if res:
self.restart()
else:
self.tk.quit()
def updateTimer(self):
ts = "00:00:00"
if self.startTime != None:
delta = datetime.now() - self.startTime
ts = str(delta).split('.')[0] # drop ms
if delta.total_seconds() < 36000:
ts = "0" + ts # zero-pad
self.labels["time"].config(text = ts)
self.frame.after(100, self.updateTimer)
def getNeighbors(self, x, y):
neighbors = []
coords = [
{"x": x-1, "y": y-1}, #top right
{"x": x-1, "y": y}, #top middle
{"x": x-1, "y": y+1}, #top left
{"x": x, "y": y-1}, #left
{"x": x, "y": y+1}, #right
{"x": x+1, "y": y-1}, #bottom right
{"x": x+1, "y": y}, #bottom middle
{"x": x+1, "y": y+1}, #bottom left
]
for n in coords:
try:
neighbors.append(self.tiles[n["x"]][n["y"]])
except KeyError:
pass
return neighbors
def onClickWrapper(self, x, y):
return lambda Button: self.onClick(self.tiles[x][y])
def onRightClickWrapper(self, x, y):
return lambda Button: self.onRightClick(self.tiles[x][y])
def onClick(self, tile):
if self.startTime == None:
self.startTime = datetime.now()
if tile["isMine"] == True:
# end game
self.gameOver(False)
return
# change image
if tile["mines"] == 0:
tile["button"].config(image = self.images["clicked"])
self.clearSurroundingTiles(tile["id"])
else:
tile["button"].config(image = self.images["numbers"][tile["mines"]-1])
# if not already set as clicked, change state and count
if tile["state"] != STATE_CLICKED:
tile["state"] = STATE_CLICKED
self.clickedCount += 1
if self.clickedCount == (SIZE_X * SIZE_Y) - self.mines:
self.gameOver(True)
def onRightClick(self, tile):
if self.startTime == None:
self.startTime = datetime.now()
# if not clicked
if tile["state"] == STATE_DEFAULT:
tile["button"].config(image = self.images["flag"])
tile["state"] = STATE_FLAGGED
tile["button"].unbind(BTN_CLICK)
# if a mine
if tile["isMine"] == True:
self.correctFlagCount += 1
self.flagCount += 1
self.refreshLabels()
# if flagged, unflag
elif tile["state"] == 2:
tile["button"].config(image = self.images["plain"])
tile["state"] = 0
tile["button"].bind(BTN_CLICK, self.onClickWrapper(tile["coords"]["x"], tile["coords"]["y"]))
# if a mine
if tile["isMine"] == True:
self.correctFlagCount -= 1
self.flagCount -= 1
self.refreshLabels()
def clearSurroundingTiles(self, id):
queue = deque([id])
while len(queue) != 0:
key = queue.popleft()
parts = key.split("_")
x = int(parts[0])
y = int(parts[1])
for tile in self.getNeighbors(x, y):
self.clearTile(tile, queue)
def clearTile(self, tile, queue):
if tile["state"] != STATE_DEFAULT:
return
if tile["mines"] == 0:
tile["button"].config(image = self.images["clicked"])
queue.append(tile["id"])
else:
tile["button"].config(image = self.images["numbers"][tile["mines"]-1])
tile["state"] = STATE_CLICKED
self.clickedCount += 1
### END OF CLASSES ###
def main():
# create Tk instance
window = Tk()
# set program title
window.title("Minesweeper")
# create game instance
minesweeper = Minesweeper(window)
# run event loop
window.mainloop()
if __name__ == "__main__":
main()
| python | MIT | fc281a07ffdac8975ddaa7ed49a027c563c5f7a1 | 2026-01-05T07:12:51.967728Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/train.py | train.py | #!/usr/bin/env python3
# Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
import hydra
import torch
from pathlib import Path
from hydra.core.hydra_config import HydraConfig
from omegaconf import DictConfig, open_dict, OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.strategies import DDPStrategy
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from pytorch_lightning.utilities.model_summary import summarize
from pytorch_lightning.callbacks import ModelCheckpoint, StochasticWeightAveraging, LearningRateMonitor
from strhub.data.module import SceneTextDataModule
from strhub.models.base import BaseSystem
from strhub.models.utils import create_model
from strhub.dist_utils import copy_remote, is_main_process
@hydra.main(config_path='configs', config_name='main', version_base='1.2')
def main(config: DictConfig):
trainer_strategy = None
with open_dict(config):
# Resolve absolute path to data.root_dir
config.data.root_dir = hydra.utils.to_absolute_path(config.data.root_dir)
# Special handling for GPU-affected config
gpus = config.trainer.get('gpus', 0)
if gpus:
# Use mixed-precision training
config.trainer.precision = 16
if gpus > 1:
# Use DDP
config.trainer.strategy = 'ddp'
# DDP optimizations
trainer_strategy = DDPStrategy(find_unused_parameters=getattr(config.model, "find_unused_parameters", False),
gradient_as_bucket_view=True)
# Scale steps-based config
config.trainer.val_check_interval //= gpus
if config.trainer.get('max_steps', -1) > 0:
config.trainer.max_steps //= gpus
# Special handling for PARseq
if config.model.get('perm_mirrored', False):
assert config.model.perm_num % 2 == 0, 'perm_num should be even if perm_mirrored = True'
# print config
rank_zero_info(OmegaConf.to_yaml(config))
# If specified, use pretrained weights to initialize the model
if config.pretrained is not None:
model: BaseSystem = create_model(config.pretrained, True)
else:
model: BaseSystem = hydra.utils.instantiate(config.model)
rank_zero_info(summarize(model, max_depth=1 if model.hparams.name.startswith('parseq') else 2))
datamodule: SceneTextDataModule = hydra.utils.instantiate(config.data)
checkpoint = ModelCheckpoint(monitor='val_accuracy', mode='max', save_top_k=1, save_last=True,
filename='{epoch}-{step}-{val_accuracy:.4f}-{val_NED:.4f}',
every_n_epochs=1)
lr_monitor = LearningRateMonitor(logging_interval='step')
callbacks = [lr_monitor, checkpoint]
if getattr(config, 'swa', False):
# set swa lrs
swa_epoch_start = 0.8
lr_scale = math.sqrt(torch.cuda.device_count()) * config.data.batch_size / 256.
lr = lr_scale * config.model.lr
if "clip" in config.model.name:
swa_lrs = [lr, lr, config.model.coef_lr * lr, config.model.coef_lr * lr]
else:
swa_lrs = [lr,]
swa_lrs = [x * (1 - swa_epoch_start) for x in swa_lrs]
swa = StochasticWeightAveraging(swa_lrs=swa_lrs, swa_epoch_start=swa_epoch_start)
callbacks.append(swa)
cwd = HydraConfig.get().runtime.output_dir if config.ckpt_path is None else \
str(Path(config.ckpt_path).parents[1].absolute())
trainer: Trainer = hydra.utils.instantiate(config.trainer, logger=TensorBoardLogger(cwd, '', '.'),
strategy=trainer_strategy, enable_model_summary=False,
accumulate_grad_batches=config.trainer.accumulate_grad_batches,
callbacks=callbacks)
trainer.fit(model, datamodule=datamodule, ckpt_path=config.ckpt_path)
# copy data and perform test
torch.distributed.barrier()
if is_main_process():
copy_remote(cwd, config.data.output_url)
test_call(cwd, config.data.root_dir, config.model.code_path)
torch.distributed.barrier()
torch.distributed.destroy_process_group()
@rank_zero_only
def test_call(cwd, data_dir, code_path=None):
file = os.path.join(code_path, 'test.py')
assert os.path.exists(file)
print("The execute file is {}".format(file))
ckpts = [x for x in os.listdir(os.path.join(cwd, 'checkpoints')) if 'val' in x]
val_acc = [float(x.split('-')[-2].split('=')[-1]) for x in ckpts]
best_ckpt = os.path.join(os.path.join(cwd, 'checkpoints'), ckpts[val_acc.index(max(val_acc))])
print("The best ckpt is {}".format(best_ckpt))
best_epoch = int(best_ckpt.split('/')[-1].split('-')[0].split('=')[-1])
print('The val accuracy is best {}-{}e'.format(max(val_acc), best_epoch))
# test best
# print("\n Test results with the best checkpoint")
# os.system("python {} {} --data_root {} --new".format(file, best_ckpt, data_dir))
# test last
print("\n Test results with the last checkpoint")
last_ckpt = os.path.join(os.path.join(cwd, 'checkpoints'), "last.ckpt")
os.system("python {} {} --data_root {} --new".format(file, last_ckpt, data_dir))
# test last with refinement
# print("\n Test results with the last checkpoint")
# last_ckpt = os.path.join(os.path.join(cwd, 'checkpoints'), "last.ckpt")
# os.system("python {} {} --data_root {} --new --clip_refine".format(file, last_ckpt, data_dir))
if __name__ == '__main__':
main()
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/hubconf.py | hubconf.py | from strhub.models.utils import create_model
dependencies = ['torch', 'pytorch_lightning', 'timm']
def parseq_tiny(pretrained: bool = False, decode_ar: bool = True, refine_iters: int = 1, **kwargs):
"""
PARSeq tiny model (img_size=128x32, patch_size=8x4, d_model=192)
@param pretrained: (bool) Use pretrained weights
@param decode_ar: (bool) use AR decoding
@param refine_iters: (int) number of refinement iterations to use
"""
return create_model('parseq-tiny', pretrained, decode_ar=decode_ar, refine_iters=refine_iters, **kwargs)
def parseq(pretrained: bool = False, decode_ar: bool = True, refine_iters: int = 1, **kwargs):
"""
PARSeq base model (img_size=128x32, patch_size=8x4, d_model=384)
@param pretrained: (bool) Use pretrained weights
@param decode_ar: (bool) use AR decoding
@param refine_iters: (int) number of refinement iterations to use
"""
return create_model('parseq', pretrained, decode_ar=decode_ar, refine_iters=refine_iters, **kwargs)
def abinet(pretrained: bool = False, iter_size: int = 3, **kwargs):
"""
ABINet model (img_size=128x32)
@param pretrained: (bool) Use pretrained weights
@param iter_size: (int) number of refinement iterations to use
"""
return create_model('abinet', pretrained, iter_size=iter_size, **kwargs)
def trba(pretrained: bool = False, **kwargs):
"""
TRBA model (img_size=128x32)
@param pretrained: (bool) Use pretrained weights
"""
return create_model('trba', pretrained, **kwargs)
def vitstr(pretrained: bool = False, **kwargs):
"""
ViTSTR small model (img_size=128x32, patch_size=8x4, d_model=384)
@param pretrained: (bool) Use pretrained weights
"""
return create_model('vitstr', pretrained, **kwargs)
def crnn(pretrained: bool = False, **kwargs):
"""
CRNN model (img_size=128x32)
@param pretrained: (bool) Use pretrained weights
"""
return create_model('crnn', pretrained, **kwargs)
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/tune.py | tune.py | #!/usr/bin/env python3
# Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import os
import shutil
from pathlib import Path
from omegaconf import DictConfig, open_dict
import hydra
from hydra.core.hydra_config import HydraConfig
import numpy as np
from pytorch_lightning import Trainer, LightningModule
from pytorch_lightning.loggers import TensorBoardLogger
from ray import tune
from ray.tune import CLIReporter
from ray.tune.integration.pytorch_lightning import TuneReportCheckpointCallback
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.schedulers import MedianStoppingRule
from ray.tune.suggest.ax import AxSearch
from strhub.data.module import SceneTextDataModule
from strhub.models.base import BaseSystem
log = logging.getLogger(__name__)
class MetricTracker(tune.Stopper):
"""Tracks the trend of the metric. Stops downward/stagnant trials. Assumes metric is being maximized."""
def __init__(self, metric, max_t, patience: int = 3, window: int = 3) -> None:
super().__init__()
self.metric = metric
self.trial_history = {}
self.max_t = max_t
self.training_iteration = 0
self.eps = 0.01 # sensitivity
self.patience = patience # number of consecutive downward/stagnant samples to trigger early stoppage.
self.kernel = self.gaussian_pdf(np.arange(window) - window // 2, sigma=0.6)
# Extra samples to keep in order to have better MAs + gradients for the middle p samples.
self.buffer = 2 * (len(self.kernel) // 2) + 2
@staticmethod
def gaussian_pdf(x, sigma=1.):
return np.exp(-(x / sigma)**2 / 2) / (sigma * np.sqrt(2 * np.pi))
@staticmethod
def moving_average(x, k):
return np.convolve(x, k, 'valid') / k.sum()
def __call__(self, trial_id, result):
self.training_iteration = result['training_iteration']
if np.isnan(result['loss']) or self.training_iteration >= self.max_t:
try:
del self.trial_history[trial_id]
except KeyError:
pass
return True
history = self.trial_history.get(trial_id, [])
# FIFO queue of metric values.
history = history[-(self.patience + self.buffer - 1):] + [result[self.metric]]
# Only start checking once we have enough data. At least one non-zero sample is required.
if len(history) == self.patience + self.buffer and sum(history) > 0:
smooth_grad = np.gradient(self.moving_average(history, self.kernel))[1:-1] # discard edge values.
# Check if trend is downward or stagnant
if (smooth_grad < self.eps).all():
log.info(f'Stopping trial = {trial_id}, hist = {history}, grad = {smooth_grad}')
try:
del self.trial_history[trial_id]
except KeyError:
pass
return True
self.trial_history[trial_id] = history
return False
def stop_all(self):
return False
class TuneReportCheckpointPruneCallback(TuneReportCheckpointCallback):
def _handle(self, trainer: Trainer, pl_module: LightningModule):
self._checkpoint._handle(trainer, pl_module)
# Prune older checkpoints
for old in sorted(Path(tune.get_trial_dir()).glob('checkpoint_epoch=*-step=*'), key=os.path.getmtime)[:-1]:
log.info(f'Deleting old checkpoint: {old}')
shutil.rmtree(old)
self._report._handle(trainer, pl_module)
def train(hparams, config, checkpoint_dir=None):
with open_dict(config):
config.model.lr = hparams['lr']
# config.model.weight_decay = hparams['wd']
model: BaseSystem = hydra.utils.instantiate(config.model)
datamodule: SceneTextDataModule = hydra.utils.instantiate(config.data)
tune_callback = TuneReportCheckpointPruneCallback({
'loss': 'val_loss',
'NED': 'val_NED',
'accuracy': 'val_accuracy'
})
ckpt_path = None if checkpoint_dir is None else os.path.join(checkpoint_dir, 'checkpoint')
trainer: Trainer = hydra.utils.instantiate(config.trainer, enable_progress_bar=False, enable_checkpointing=False,
logger=TensorBoardLogger(save_dir=tune.get_trial_dir(), name='',
version='.'),
callbacks=[tune_callback])
trainer.fit(model, datamodule=datamodule, ckpt_path=ckpt_path)
@hydra.main(config_path='configs', config_name='tune', version_base='1.2')
def main(config: DictConfig):
# Special handling for PARseq
if config.model.get('perm_mirrored', False):
assert config.model.perm_num % 2 == 0, 'perm_num should be even if perm_mirrored = True'
# Modify config
with open_dict(config):
# Use mixed-precision training
if config.trainer.get('gpus', 0):
config.trainer.precision = 16
# Resolve absolute path to data.root_dir
config.data.root_dir = hydra.utils.to_absolute_path(config.data.root_dir)
hparams = {
'lr': tune.loguniform(config.tune.lr.min, config.tune.lr.max),
# 'wd': tune.loguniform(config.tune.wd.min, config.tune.wd.max),
}
steps_per_epoch = len(hydra.utils.instantiate(config.data).train_dataloader())
val_steps = steps_per_epoch * config.trainer.max_epochs / config.trainer.val_check_interval
max_t = round(0.75 * val_steps)
warmup_t = round(config.model.warmup_pct * val_steps)
scheduler = MedianStoppingRule(time_attr='training_iteration', grace_period=warmup_t)
# Always start by evenly diving the range in log scale.
lr = hparams['lr']
start = np.log10(lr.lower)
stop = np.log10(lr.upper)
num = math.ceil(stop - start) + 1
initial_points = [{'lr': np.clip(x, lr.lower, lr.upper).item()} for x in reversed(np.logspace(start, stop, num))]
search_alg = AxSearch(points_to_evaluate=initial_points)
reporter = CLIReporter(
parameter_columns=['lr'],
metric_columns=['loss', 'accuracy', 'training_iteration'])
out_dir = Path(HydraConfig.get().runtime.output_dir if config.tune.resume_dir is None else config.tune.resume_dir)
analysis = tune.run(
tune.with_parameters(train, config=config),
name=out_dir.name,
metric='NED',
mode='max',
stop=MetricTracker('NED', max_t),
config=hparams,
resources_per_trial={
'cpu': 1,
'gpu': config.tune.gpus_per_trial
},
num_samples=config.tune.num_samples,
local_dir=str(out_dir.parent.absolute()),
search_alg=search_alg,
scheduler=scheduler,
progress_reporter=reporter,
resume=config.tune.resume_dir is not None,
trial_executor=RayTrialExecutor(result_buffer_length=0) # disable result buffering
)
print('Best hyperparameters found were: ', analysis.best_config)
if __name__ == '__main__':
main()
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/bench.py | bench.py | #!/usr/bin/env python3
# Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from torch.utils import benchmark
from fvcore.nn import FlopCountAnalysis, ActivationCountAnalysis, flop_count_table
import hydra
from omegaconf import DictConfig
@torch.inference_mode()
@hydra.main(config_path='configs', config_name='bench', version_base='1.2')
def main(config: DictConfig):
# For consistent behavior
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
device = config.get('device', 'cuda')
h, w = config.data.img_size
x = torch.rand(1, 3, h, w, device=device)
model = hydra.utils.instantiate(config.model).eval().to(device)
if config.get('range', False):
for i in range(1, 26, 4):
timer = benchmark.Timer(
stmt='model(x, len)',
globals={'model': model, 'x': x, 'len': i})
print(timer.blocked_autorange(min_run_time=1))
else:
timer = benchmark.Timer(
stmt='model(x)',
globals={'model': model, 'x': x})
flops = FlopCountAnalysis(model, x)
acts = ActivationCountAnalysis(model, x)
print(timer.blocked_autorange(min_run_time=1))
print(flop_count_table(flops, 1, acts, False))
if __name__ == '__main__':
main()
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/test.py | test.py | #!/usr/bin/env python3
# Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import string
import argparse
from dataclasses import dataclass
from typing import List
import torch
from tqdm import tqdm
from strhub.data.module import SceneTextDataModule
from strhub.models.utils import load_from_checkpoint, parse_model_args
@dataclass
class Result:
dataset: str
num_samples: int
accuracy: float
ned: float
confidence: float
label_length: float
def print_results_table(results: List[Result], file=None):
w = max(map(len, map(getattr, results, ['dataset'] * len(results))))
w = max(w, len('Dataset'), len('Combined'))
print('| {:<{w}} | # samples | Accuracy | 1 - NED | Confidence | Label Length |'.format('Dataset', w=w), file=file)
print('|:{:-<{w}}:|----------:|---------:|--------:|-----------:|-------------:|'.format('----', w=w), file=file)
c = Result('Combined', 0, 0, 0, 0, 0)
for res in results:
c.num_samples += res.num_samples
c.accuracy += res.num_samples * res.accuracy
c.ned += res.num_samples * res.ned
c.confidence += res.num_samples * res.confidence
c.label_length += res.num_samples * res.label_length
print(f'| {res.dataset:<{w}} | {res.num_samples:>9} | {res.accuracy:>8.2f} | {res.ned:>7.2f} '
f'| {res.confidence:>10.2f} | {res.label_length:>12.2f} |', file=file)
c.accuracy /= c.num_samples
c.ned /= c.num_samples
c.confidence /= c.num_samples
c.label_length /= c.num_samples
print('|-{:-<{w}}-|-----------|----------|---------|------------|--------------|'.format('----', w=w), file=file)
print(f'| {c.dataset:<{w}} | {c.num_samples:>9} | {c.accuracy:>8.2f} | {c.ned:>7.2f} '
f'| {c.confidence:>10.2f} | {c.label_length:>12.2f} |', file=file)
@torch.inference_mode()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint', help="Model checkpoint (or 'pretrained=<model_id>')")
parser.add_argument('--data_root', default='data')
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--cased', action='store_true', default=False, help='Cased comparison')
parser.add_argument('--punctuation', action='store_true', default=False, help='Check punctuation')
parser.add_argument('--new', action='store_true', default=False, help='Evaluate on new benchmark datasets')
parser.add_argument('--rotation', type=int, default=0, help='Angle of rotation (counter clockwise) in degrees.')
parser.add_argument('--device', default='cuda')
parser.add_argument('--clip_model_path', type=str,
default="path/to/clip/model",
help='path to the clip model')
parser.add_argument('--clip_refine', action='store_true', default=False,
help='use clip to refine the predicted results')
parser.add_argument('--sample_K', type=int, default=5,
help='K of top-K when performing CLIP post-refinement')
parser.add_argument('--sample_K2', type=int, default=5,
help='K of top-K choosed predictions when performing CLIP post-refinement')
parser.add_argument('--sample_total', type=int, default=50,
help='the number of samples when sample from the predicted probability distribution')
parser.add_argument('--sample_prompt', type=str, default=None,
help='prompt for CLIP')
parser.add_argument('--alpha', type=float, default=0.1,
help='prompt for CLIP')
args, unknown = parser.parse_known_args()
kwargs = parse_model_args(unknown)
print(args)
if os.path.isdir(args.checkpoint):
ckpts = [x for x in os.listdir(args.checkpoint) if 'val' in x]
assert len(ckpts) >= 1
val_acc = [float(x.split('-')[-2].split('=')[-1]) for x in ckpts]
best_ckpt = os.path.join(args.checkpoint, ckpts[val_acc.index(max(val_acc))])
best_epoch = int(best_ckpt.split('/')[-1].split('-')[0].split('=')[-1])
print('The val accuracy is best {}-{}e'.format(max(val_acc), best_epoch))
args.checkpoint = best_ckpt
charset_test = string.digits + string.ascii_lowercase
if args.cased:
charset_test += string.ascii_uppercase
if args.punctuation:
charset_test += string.punctuation
kwargs.update({'charset_test': charset_test})
print(f'Additional keyword arguments: {kwargs}')
print('load weights from checkpoint {}'.format(args.checkpoint))
model = load_from_checkpoint(args.checkpoint, **kwargs).eval().to(args.device)
hp = model.hparams
datamodule = SceneTextDataModule(args.data_root, '_unused_', hp.img_size, hp.max_label_length, hp.charset_train,
hp.charset_test, args.batch_size, args.num_workers, False, rotation=args.rotation)
test_set = SceneTextDataModule.TEST_BENCHMARK_SUB + SceneTextDataModule.TEST_BENCHMARK
if args.new:
test_set += SceneTextDataModule.TEST_NEW
test_set = sorted(set(test_set))
torch.cuda.synchronize()
start_time = time.time()
results = {}
max_width = max(map(len, test_set))
all_total = 0
for name, dataloader in datamodule.test_dataloaders(test_set).items():
total = 0
correct = 0
ned = 0
confidence = 0
label_length = 0
for imgs, labels in tqdm(iter(dataloader), desc=f'{name:>{max_width}}'):
res = model.test_step((imgs.to(model.device), labels), -1,
clip_model_path=args.clip_model_path,
clip_refine=args.clip_refine,
sample_K=args.sample_K,
sample_K2=args.sample_K2,
sample_total=args.sample_total,
sample_prompt=args.sample_prompt,
alpha=args.alpha
)['output']
total += res.num_samples
correct += res.correct
ned += res.ned
confidence += res.confidence
label_length += res.label_length
all_total += total
accuracy = 100 * correct / total
mean_ned = 100 * (1 - ned / total)
mean_conf = 100 * confidence / total
mean_label_length = label_length / total
results[name] = Result(name, total, accuracy, mean_ned, mean_conf, mean_label_length)
result_groups = {
'Benchmark (Subset)': SceneTextDataModule.TEST_BENCHMARK_SUB,
'Benchmark': SceneTextDataModule.TEST_BENCHMARK
}
if args.new:
result_groups.update({'New': SceneTextDataModule.TEST_NEW})
torch.cuda.synchronize()
total_time = time.time() - start_time
if args.clip_refine:
log_filename = args.checkpoint + '.log_K{}-{}-{}_prompt{}_alpha{}_new{}.txt'.format(
args.sample_K2, args.sample_K, args.sample_total, args.sample_prompt, args.alpha, str(args.new))
else:
log_filename = args.checkpoint + '.log_new{}.txt'.format(str(args.new))
with open(log_filename, 'w') as f:
for out in [f, sys.stdout]:
for group, subset in result_groups.items():
print(f'{group} set:', file=out)
print_results_table([results[s] for s in subset], out)
print('\n', file=out)
print("Time: Total {}s, Average {}ms. Total samples {}.".format(total_time, total_time * 1000.0 / all_total, all_total), file=out)
if __name__ == '__main__':
main()
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/read.py | read.py | #!/usr/bin/env python3
# Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import torch
from PIL import Image
from strhub.data.module import SceneTextDataModule
from strhub.models.utils import load_from_checkpoint, parse_model_args
@torch.inference_mode()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint', help="Model checkpoint (or 'pretrained=<model_id>')")
parser.add_argument('--images_path', type=str, help='Images to read')
parser.add_argument('--device', default='cuda')
args, unknown = parser.parse_known_args()
kwargs = parse_model_args(unknown)
print(f'Additional keyword arguments: {kwargs}')
model = load_from_checkpoint(args.checkpoint, **kwargs).eval().to(args.device)
img_transform = SceneTextDataModule.get_transform(model.hparams.img_size)
files = sorted([x for x in os.listdir(args.images_path) if x.endswith('png') or x.endswith('jpeg') or x.endswith('jpg')])
for fname in files:
# Load image and prepare for input
filename = os.path.join(args.images_path, fname)
image = Image.open(filename).convert('RGB')
image = img_transform(image).unsqueeze(0).to(args.device)
p = model(image).softmax(-1)
pred, p = model.tokenizer.decode(p)
print(f'{fname}: {pred[0]}')
if __name__ == '__main__':
main()
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/dist_utils.py | strhub/dist_utils.py | # coding=utf-8
import os
import logging
import torch.distributed as dist
from pytorch_lightning.utilities import rank_zero_only
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def setup_logger(log_file=None, include_host=False):
"""
Ref: https://github.com/mlfoundations/open_clip/blob/db338b0bb36c15ae12fcd37e86120414903df1ef/src/training/logger.
"""
if include_host:
import socket
hostname = socket.gethostname()
formatter = logging.Formatter(
f'%(asctime)s | {hostname} | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')
else:
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')
level = logging.INFO if is_main_process() else logging.WARN
logging.root.setLevel(level)
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
logger.setLevel(level)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logging.root.addHandler(stream_handler)
if log_file:
file_handler = logging.FileHandler(filename=log_file)
file_handler.setFormatter(formatter)
logging.root.addHandler(file_handler)
@rank_zero_only
def copy_remote(local_dir, remote_dir=None):
if remote_dir is not None:
print('=' * 50)
os.chdir(local_dir)
os.chdir('..')
base_name = os.path.basename(local_dir)
tar_filename = '{}.tar'.format(base_name)
if os.path.exists(os.path.join(os.getcwd(), tar_filename)):
print('remove existing tarfile and create a new tarfile')
os.system("rm {}".format(tar_filename))
os.system("tar -zcvf {} {}".format(tar_filename, base_name))
os.system("rsync -rvP {} {}".format(tar_filename, remote_dir))
os.system("ls -lah {}".format(remote_dir))
print("Copy success!")
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/__init__.py | strhub/__init__.py | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false | |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/clip/model.py | strhub/clip/model.py | from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.flatten(start_dim=2).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x[:1], key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x.squeeze(0)
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.relu3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, cls=False, projection=True):
"""
Args:
cls: if False, return all features of all tokens
projection:
"""
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
if cls:
x = self.ln_post(x[:, 0, :])
if self.proj is not None and projection:
x = x @ self.proj
else:
# NEW ADDED CODE
# here we need to output all tokens
x = self.ln_post(x)
if self.proj is not None and projection:
x = torch.matmul(x, self.proj)
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image, cls=False, projection=True):
return self.visual(image.type(self.dtype), cls=cls, projection=projection)
def encode_text(self, text, eot=False, projection=True):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
# x = x + self.positional_embedding.type(self.dtype)
pos_emd = self.positional_embedding[:x.size(1), :]
x = x + pos_emd
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
if eot:
# take features from the eot embedding (eot_token is the highest number in each sequence)
if projection:
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
else:
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)]
else:
if projection:
x = torch.matmul(x, self.text_projection)
return x
def forward(self, image, text):
# NOTE: original CLIP return index 0 token, but we return all tokens
image_features = self.encode_image(image, cls=True)
text_features = self.encode_text(text, eot=True)
# normalized features
image_features = image_features / image_features.norm(dim=1, keepdim=True)
text_features = text_features / text_features.norm(dim=1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/clip/simple_tokenizer.py | strhub/clip/simple_tokenizer.py | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/clip/__init__.py | strhub/clip/__init__.py | from .clip import *
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/clip/clip.py | strhub/clip/clip.py | import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"RN50x64": "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
"ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
# Checkpoints from OpenCLIP
"OpenCLIP-ViT-B/16": "https://huggingface.co/laion/CLIP-ViT-B-16-DataComp.XL-s13B-b90K/blob/main/open_clip_pytorch_model.bin",
# SHA256: 80905176486d952914c1deb3b753b8e653ab54aa4ffb14208bc5007cf7643a16
"OpenCLIP-ViT-L/14": "https://huggingface.co/laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K/blob/main/open_clip_pytorch_model.bin",
# SHA256: 6509f07e6fc0da68f8e1ee881bf90803f0b053d2f7ed2013cc7c3a49ac4dd3db
"OpenCLIP-ViT-H/14": "https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K/blob/main/open_clip_pytorch_model.bin",
# SHA256: 9a78ef8e8c73fd0df621682e7a8e8eb36c6916cb3c16b291a082ecd52ab79cc4
"appleDFN5B-CLIP-ViT-H-14": "https://huggingface.co/apple/DFN5B-CLIP-ViT-H-14/blob/main/open_clip_pytorch_model.bin",
# SHA256: d67de50faa7f3ddce52fbab4f4656b04686a0bb15c26ebd0144d375cfa08b8ae
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError("Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit: bool = False, download_root: str = None):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
# model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
raise NotImplementedError("Please pass a checkpoint file")
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
print("loading checkpoint from {}".format(model_path))
if "OpenCLIP" in model_path or "open_clip" in model_path or "apple" in model_path or "laion" in model_path:
# take care of the pre-trained weights from OpenCLIP https://github.com/mlfoundations/open_clip
state_dict = torch.load(model_path, map_location="cpu")
else:
try:
with open(model_path, 'rb') as opened_file:
# loading JIT archive
model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
# state_dict = torch.load(opened_file, map_location="cpu")
state_dict = torch.load(model_path, map_location="cpu")['model']
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> Union[torch.IntTensor, torch.LongTensor]:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
else:
result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/modules.py | strhub/models/modules.py | r"""Shared modules used by CRNN and TRBA"""
from torch import nn
class BidirectionalLSTM(nn.Module):
"""Ref: https://github.com/clovaai/deep-text-recognition-benchmark/blob/master/modules/sequence_modeling.py"""
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.rnn = nn.LSTM(input_size, hidden_size, bidirectional=True, batch_first=True)
self.linear = nn.Linear(hidden_size * 2, output_size)
def forward(self, input):
"""
input : visual feature [batch_size x T x input_size], T = num_steps.
output : contextual feature [batch_size x T x output_size]
"""
recurrent, _ = self.rnn(input) # batch_size x T x input_size -> batch_size x T x (2*hidden_size)
output = self.linear(recurrent) # batch_size x T x output_size
return output
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/post_process.py | strhub/models/post_process.py | # coding = utf-8
import torch
from strhub.clip import clip
@torch.no_grad()
def clip_post_process(clip_model, image, probs, charset_adapter, char_tokenizer,
K=10, K2=5, num_samples=50, prompt=None, alpha=0.3):
"""using CLIP to do post-refinement
Args:
clip_model: the clip model
image: input image, by default, after transforms
probs: [L, C], probability distribution
K: the sampled captions
K2: if K2 > 0, before sampling, choose the top-K2 probabilities
charset_adapter: charset adapter
"""
max_p_y, max_ids = probs.max(-1)
max_p_y_pord = max_p_y.prod(dim=0)
p_y_prod, p_y, p_y_ind = top_k_candidate_v2(probs, K=K, num_samples=num_samples, K2=K2)
# if the prediction with max probability not sampled, mannully add it
if p_y_prod.max() < max_p_y_pord:
try:
p_y_prod = torch.cat([p_y_prod, max_p_y_pord.unsqueeze(0)], dim=0)
p_y = torch.cat([p_y, max_p_y.unsqueeze(0)], dim=0)
p_y_ind = torch.cat([p_y_ind, max_ids.unsqueeze(0)], dim=0)
except:
print(p_y.shape, p_y_ind.shape)
text, preds = [], []
for ind in p_y_ind:
ids = ind.tolist()
# maybe a single id
ids = ids if isinstance(ids, list) else [ids]
tokens = char_tokenizer._ids2tok(ids, join=True)
# charset_adapter is necessary
string = charset_adapter(tokens)
# add prompt or not
if prompt is not None:
text.append(prompt + " " + string)
else:
text.append(string)
preds.append(string)
if len(text) > 1:
with torch.no_grad():
text_input = clip.tokenize(text).to(image.device)
logits_from_image, logits_from_text = clip_model(image, text_input)
clip_prob = logits_from_image.softmax(dim=-1)
else:
clip_prob = p_y_prod.softmax(-1)
# combine the probabilities
final_prob = p_y_prod.softmax(-1) + alpha * clip_prob
p, ind = final_prob.max(dim=-1)
final_p_y, final_ind, final_pred = p_y[ind, ...], p_y_ind[ind, ...], preds[ind.item()]
return final_p_y, final_ind, final_pred
def top_k_candidate(prob, K=10, num_samples=30):
"""
sample the top-k candidate from the probability distribution
prob: shape [length of the string, num_classes], probability distribution
K: the number of candidates
Return:
p_y_prod: [K], the joint probability
p_y: [K, L], all probabilities
p_y_ind: [K, L], correspoding index of p_y
"""
p_y, p_y_ind, p_y_prod = [], [], []
for i in range(num_samples):
# sample as multinomial distribution
ind = torch.multinomial(prob, 1)
sample = torch.gather(prob, 1, ind).squeeze(dim=-1)
prod = sample.prod(dim=0).item()
if prod in p_y_prod:
continue
else:
p_y.append(sample)
p_y_ind.append(ind.squeeze(dim=-1))
p_y_prod.append(prod)
p_y = torch.stack(p_y, dim=0)
p_y_ind = torch.stack(p_y_ind, dim=0)
p_y_prod = p_y.prod(dim=1)
k_prod, k_ind = torch.topk(p_y_prod, min(K, len(p_y_prod)), dim=0)
return p_y_prod[k_ind], p_y[k_ind, ...], p_y_ind[k_ind, ...]
def top_k_candidate_v2(prob, K=10, num_samples=30, K2=5):
"""
sample the top-k candidate from the probability distribution.
In this version, we first choose the top-K samples of each prediciton of char, then
sample from the choosed top-K predictios.
prob: shape [length of the string, num_classes], probability distribution
K: the number of candidates
K2: the number of choosed top-K candidates
"""
if K2 <= 0 or K2 >= prob.shape[-1]:
return top_k_candidate(prob, K, num_samples)
else:
# [L, C] -> [L, K2]
k_prob, k_ind = torch.topk(prob, k=K2, dim=-1)
# k_p_y_ind: [K, L]
k_p_y_prod, k_p_y, k_p_y_ind = top_k_candidate(k_prob, K, num_samples)
# map k_p_y_ind -> k_ind
p_y_ind = torch.gather(k_ind, 1, k_p_y_ind.transpose(0, 1)).transpose(0, 1)
return k_p_y_prod, k_p_y, p_y_ind
if __name__ == "__main__":
# sum_x = 0
# for i in range(1000):
# a = torch.tensor([ [0.1, 0.2, 0.7],
# [0.2, 0.7, 0.1] ], dtype=torch.float)
# x = torch.multinomial(a, 1)
# # print(x)
# sum_x += x
# print(sum_x / 1000)
probs = torch.tensor([ [0.1, 0.2, 0.7],
[0.2, 0.7, 0.1],
[0.7, 0.1, 0.2] ], dtype=torch.float)
# probs = torch.tensor([ [0.1, 0.2, 0.7] ], dtype=torch.float)
p, p_y, p_y_ind = top_k_candidate_v2(probs, K=4, num_samples=30, K2=2)
print("input probability:\n", probs)
print("joint probability:\n", p)
print("probabilities of discrete random variable:\n", p_y)
print("index:\n", p_y_ind)
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/utils.py | strhub/models/utils.py | from pathlib import PurePath
from typing import Sequence
import torch
from torch import nn
import yaml
from torchvision.ops.misc import FrozenBatchNorm2d
class InvalidModelError(RuntimeError):
"""Exception raised for any model-related error (creation, loading)"""
_WEIGHTS_URL = {
'parseq-tiny': 'https://github.com/baudm/parseq/releases/download/v1.0.0/parseq_tiny-e7a21b54.pt',
'parseq': 'https://github.com/baudm/parseq/releases/download/v1.0.0/parseq-bb5792a6.pt',
'abinet': 'https://github.com/baudm/parseq/releases/download/v1.0.0/abinet-1d1e373e.pt',
'trba': 'https://github.com/baudm/parseq/releases/download/v1.0.0/trba-cfaed284.pt',
'vitstr': 'https://github.com/baudm/parseq/releases/download/v1.0.0/vitstr-26d0fcf4.pt',
'crnn': 'https://github.com/baudm/parseq/releases/download/v1.0.0/crnn-679d0e31.pt',
}
def _get_config(experiment: str, **kwargs):
"""Emulates hydra config resolution"""
root = PurePath(__file__).parents[2]
with open(root / 'configs/main.yaml', 'r') as f:
config = yaml.load(f, yaml.Loader)['model']
with open(root / f'configs/charset/94_full.yaml', 'r') as f:
config.update(yaml.load(f, yaml.Loader)['model'])
with open(root / f'configs/experiment/{experiment}.yaml', 'r') as f:
exp = yaml.load(f, yaml.Loader)
# Apply base model config
model = exp['defaults'][0]['override /model']
with open(root / f'configs/model/{model}.yaml', 'r') as f:
config.update(yaml.load(f, yaml.Loader))
# Apply experiment config
if 'model' in exp:
config.update(exp['model'])
config.update(kwargs)
return config
def _get_model_class(key):
exp = 'parseq'
if 'abinet' in key:
from .abinet.system import ABINet as ModelClass
exp = 'abinet'
elif 'crnn' in key:
from .crnn.system import CRNN as ModelClass
exp = 'crnn'
elif 'parseq' in key:
from .parseq.system import PARSeq as ModelClass
exp = 'parseq'
elif 'trba' in key:
from .trba.system import TRBA as ModelClass
elif 'trbc' in key:
from .trba.system import TRBC as ModelClass
elif 'vitstr' in key:
from .vitstr.system import ViTSTR as ModelClass
exp = 'vitstr'
elif 'vl4str' in key or 'clip4str' in key:
from .vl_str.system import VL4STR as ModelClass
exp = 'vl4str'
if 'large' in key:
exp = 'vl4str-large'
if 'base32x32' in key:
exp = 'vl4str-base32'
if 'huge' in key:
exp = 'vl4str-huge'
elif 'str_adapter' in key:
from .str_adapter.system import STRAdapter as ModelClass
exp = 'str_adapter'
else:
raise InvalidModelError("Unable to find model class for '{}'".format(key))
return ModelClass, exp
def create_model(experiment: str, pretrained: bool = False, **kwargs):
try:
config = _get_config(experiment, **kwargs)
except FileNotFoundError:
raise InvalidModelError("No configuration found for '{}'".format(experiment)) from None
ModelClass = _get_model_class(experiment)
model = ModelClass(**config)
if pretrained:
try:
url = _WEIGHTS_URL[experiment]
except KeyError:
raise InvalidModelError("No pretrained weights found for '{}'".format(experiment)) from None
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint)
return model
def load_from_checkpoint(checkpoint_path: str, **kwargs):
# if checkpoint_path.startswith('pretrained='):
# model_id = checkpoint_path.split('=', maxsplit=1)[1]
# model = create_model(model_id, True, **kwargs)
# else:
# ModelClass, _ = _get_model_class(checkpoint_path)
# model = ModelClass.load_from_checkpoint(checkpoint_path, **kwargs)
try:
ModelClass, _ = _get_model_class(checkpoint_path)
model = ModelClass.load_from_checkpoint(checkpoint_path, **kwargs)
except:
ModelClass, experiment = _get_model_class(checkpoint_path)
try:
config = _get_config(experiment, **kwargs)
except FileNotFoundError:
raise InvalidModelError("No configuration found for '{}'".format(experiment)) from None
model = ModelClass(**config)
checkpoint = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint)
return model
def parse_model_args(args):
kwargs = {}
arg_types = {t.__name__: t for t in [int, float, str]}
arg_types['bool'] = lambda v: v.lower() == 'true' # special handling for bool
for arg in args:
name, value = arg.split('=', maxsplit=1)
name, arg_type = name.split(':', maxsplit=1)
kwargs[name] = arg_types[arg_type](value)
return kwargs
def init_weights(module: nn.Module, name: str = '', exclude: Sequence[str] = ()):
"""Initialize the weights using the typical initialization schemes used in SOTA models."""
if any(map(name.startswith, exclude)):
return
if isinstance(module, nn.Linear):
nn.init.trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.trunc_normal_(module.weight, std=.02)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
def freeze_batch_norm_2d(module, module_match={}, name=""):
"""
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
returned. Otherwise, the module is walked recursively and submodules are converted in place.
Args:
module (torch.nn.Module): Any PyTorch module.
module_match (dict): Dictionary of full module names to freeze (all if empty)
name (str): Full module name (prefix)
Returns:
torch.nn.Module: Resulting module
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
"""
res = module
is_match = True
if module_match:
is_match = name in module_match
if is_match and isinstance(
module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)
):
res = FrozenBatchNorm2d(module.num_features)
res.num_features = module.num_features
res.affine = module.affine
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for child_name, child in module.named_children():
full_child_name = ".".join([name, child_name]) if name else child_name
new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
if new_child is not child:
res.add_module(child_name, new_child)
return res
if __name__ == "__main__":
import numpy as np
# define a sequence of 10 words over a vocab of 5 words
data = [[0.1, 0.2, 0.3, 0.4, 0.5],
[0.5, 0.4, 0.3, 0.2, 0.1],
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.5, 0.4, 0.3, 0.2, 0.1],
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.5, 0.4, 0.3, 0.2, 0.1],
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.5, 0.4, 0.3, 0.2, 0.1],
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.5, 0.4, 0.3, 0.2, 0.1]]
data = np.array(data)
# # decode sequence
# result = beam_search_decoder(data, 3)
# # print result
# for seq in result:
# print(seq)
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/__init__.py | strhub/models/__init__.py | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false | |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/base.py | strhub/models/base.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Optional, Tuple, List
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from nltk import edit_distance
from pytorch_lightning.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT
from timm.optim import create_optimizer_v2
from torch import Tensor
from torch.optim import Optimizer
from torch.optim.lr_scheduler import OneCycleLR
from strhub.data.utils import CharsetAdapter, CTCTokenizer, Tokenizer, BaseTokenizer
from strhub.models.post_process import clip_post_process
from strhub.clip import clip
@dataclass
class BatchResult:
num_samples: int
correct: int
ned: float
confidence: float
label_length: int
loss: Tensor
loss_numel: int
class BaseSystem(pl.LightningModule, ABC):
def __init__(self, tokenizer: BaseTokenizer, charset_test: str,
batch_size: int, lr: float, warmup_pct: float, weight_decay: float) -> None:
super().__init__()
self.tokenizer = tokenizer
self.charset_adapter = CharsetAdapter(charset_test)
self.batch_size = batch_size
self.lr = lr
self.warmup_pct = warmup_pct
self.weight_decay = weight_decay
@abstractmethod
def forward(self, images: Tensor, max_length: Optional[int] = None) -> Tensor:
"""Inference
Args:
images: Batch of images. Shape: N, Ch, H, W
max_length: Max sequence length of the output. If None, will use default.
Returns:
logits: N, L, C (L = sequence length, C = number of classes, typically len(charset_train) + num specials)
"""
raise NotImplementedError
@abstractmethod
def forward_logits_loss(self, images: Tensor, labels: List[str]) -> Tuple[Tensor, Tensor, int]:
"""Like forward(), but also computes the loss (calls forward() internally).
Args:
images: Batch of images. Shape: N, Ch, H, W
labels: Text labels of the images
Returns:
logits: N, L, C (L = sequence length, C = number of classes, typically len(charset_train) + num specials)
loss: mean loss for the batch
loss_numel: number of elements the loss was calculated from
"""
raise NotImplementedError
def configure_optimizers(self):
agb = self.trainer.accumulate_grad_batches
# Linear scaling so that the effective learning rate is constant regardless of the number of GPUs used with DDP.
lr_scale = agb * math.sqrt(self.trainer.num_devices) * self.batch_size / 256.
lr = lr_scale * self.lr
optim = create_optimizer_v2(self, 'adamw', lr, self.weight_decay)
sched = OneCycleLR(optim, lr, self.trainer.estimated_stepping_batches, pct_start=self.warmup_pct,
cycle_momentum=False)
return {'optimizer': optim, 'lr_scheduler': {'scheduler': sched, 'interval': 'step'}}
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
optimizer.zero_grad(set_to_none=True)
def _eval_step(self, batch, validation: bool, clip_refine: bool = False, **kwargs) -> Optional[STEP_OUTPUT]:
images, labels = batch
correct = 0
total = 0
ned = 0
confidence = 0
label_length = 0
if validation:
logits, loss, loss_numel = self.forward_logits_loss(images, labels)
else:
# At test-time, we shouldn't specify a max_label_length because the test-time charset used
# might be different from the train-time charset. max_label_length in eval_logits_loss() is computed
# based on the transformed label, which could be wrong if the actual gt label contains characters existing
# in the train-time charset but not in the test-time charset. For example, "aishahaleyes.blogspot.com"
# is exactly 25 characters, but if processed by CharsetAdapter for the 36-char set, it becomes 23 characters
# long only, which sets max_label_length = 23. This will cause the model prediction to be truncated.
logits = self.forward(images)
loss = loss_numel = None # Only used for validation; not needed at test-time.
probs = logits.softmax(-1)
if clip_refine:
# use clip to refine the final results
batch_probs = self.tokenizer.decode_for_refine(probs)
preds, probs = [], []
for i, prob in enumerate(batch_probs):
post_prob, post_ids, post_pred = \
clip_post_process(self.clip_model,
images[i, ...].unsqueeze(0),
prob, self.charset_adapter, self.tokenizer,
K=5 if not "sample_K" in kwargs else kwargs["sample_K"],
K2=0 if not "sample_K2" in kwargs else kwargs["sample_K2"],
num_samples=50 if not "sample_total" in kwargs else kwargs["sample_total"],
prompt=None if not "sample_prompt" in kwargs else kwargs["sample_prompt"],
alpha=0.1 if not "alpha" in kwargs else kwargs["alpha"]
)
preds.append(post_pred)
probs.append(post_prob)
else:
preds, probs = self.tokenizer.decode(probs)
for pred, prob, gt in zip(preds, probs, labels):
confidence += prob.prod().item()
# adapt for the test charset
pred = self.charset_adapter(pred) if not clip_refine else pred
# Follow ICDAR 2019 definition of N.E.D.
ned += edit_distance(pred, gt) / max(len(pred), len(gt))
if pred == gt:
correct += 1
# else:
# # check for the wrong case
# print(total, pred, gt)
total += 1
label_length += len(pred)
return dict(output=BatchResult(total, correct, ned, confidence, label_length, loss, loss_numel))
@staticmethod
def _aggregate_results(outputs: EPOCH_OUTPUT) -> Tuple[float, float, float]:
if not outputs:
return 0., 0., 0.
total_loss = 0
total_loss_numel = 0
total_n_correct = 0
total_norm_ED = 0
total_size = 0
for result in outputs:
result = result['output']
total_loss += result.loss_numel * result.loss
total_loss_numel += result.loss_numel
total_n_correct += result.correct
total_norm_ED += result.ned
total_size += result.num_samples
acc = total_n_correct / total_size
ned = (1 - total_norm_ED / total_size)
loss = total_loss / total_loss_numel
return acc, ned, loss
def validation_step(self, batch, batch_idx) -> Optional[STEP_OUTPUT]:
return self._eval_step(batch, True, clip_refine=False)
def validation_epoch_end(self, outputs: EPOCH_OUTPUT) -> None:
acc, ned, loss = self._aggregate_results(outputs)
self.log('val_accuracy', 100 * acc, sync_dist=True)
self.log('val_NED', 100 * ned, sync_dist=True)
self.log('val_loss', loss, sync_dist=True)
self.log('hp_metric', acc, sync_dist=True)
def test_step(self, batch, batch_idx, clip_refine: bool = False, **kwargs) -> Optional[STEP_OUTPUT]:
if clip_refine and self.clip_model is None:
print('[CLIP] creating CLIP model for post refinement')
model, clip_transform = clip.load(kwargs["clip_model_path"], device=batch[0].device)
self.clip_model = model.float()
return self._eval_step(batch, False, clip_refine=clip_refine, **kwargs)
class CrossEntropySystem(BaseSystem):
def __init__(self, charset_train: str, charset_test: str,
batch_size: int, lr: float, warmup_pct: float, weight_decay: float) -> None:
tokenizer = Tokenizer(charset_train)
super().__init__(tokenizer, charset_test, batch_size, lr, warmup_pct, weight_decay)
self.bos_id = tokenizer.bos_id
self.eos_id = tokenizer.eos_id
self.pad_id = tokenizer.pad_id
self.clip_model = None
def forward_logits_loss(self, images: Tensor, labels: List[str]) -> Tuple[Tensor, Tensor, int]:
targets = self.tokenizer.encode(labels, self.device)
targets = targets[:, 1:] # Discard <bos>
max_len = targets.shape[1] - 1 # exclude <eos> from count
logits = self.forward(images, max_len)
loss = F.cross_entropy(logits.flatten(end_dim=1), targets.flatten(), ignore_index=self.pad_id)
loss_numel = (targets != self.pad_id).sum()
return logits, loss, loss_numel
class CTCSystem(BaseSystem):
def __init__(self, charset_train: str, charset_test: str,
batch_size: int, lr: float, warmup_pct: float, weight_decay: float) -> None:
tokenizer = CTCTokenizer(charset_train)
super().__init__(tokenizer, charset_test, batch_size, lr, warmup_pct, weight_decay)
self.blank_id = tokenizer.blank_id
def forward_logits_loss(self, images: Tensor, labels: List[str]) -> Tuple[Tensor, Tensor, int]:
targets = self.tokenizer.encode(labels, self.device)
logits = self.forward(images)
log_probs = logits.log_softmax(-1).transpose(0, 1) # swap batch and seq. dims
T, N, _ = log_probs.shape
input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.long, device=self.device)
target_lengths = torch.as_tensor(list(map(len, labels)), dtype=torch.long, device=self.device)
loss = F.ctc_loss(log_probs, targets, input_lengths, target_lengths, blank=self.blank_id, zero_infinity=True)
return logits, loss, N
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vl_str/perm_test.py | strhub/models/vl_str/perm_test.py | # coding=utf-8
import torch
def generate_attn_masks(perm):
"""Generate attention masks given a sequence permutation (includes pos. for bos and eos tokens)
:param perm: the permutation sequence. i = 0 is always the BOS
:return: lookahead attention masks
"""
sz = perm.shape[0]
mask = torch.zeros((sz, sz))
for i in range(sz):
query_idx = perm[i]
masked_keys = perm[i + 1:]
mask[query_idx, masked_keys] = float('-inf')
content_mask = mask[:-1, :-1].clone()
mask[torch.eye(sz, dtype=torch.bool)] = float('-inf') # mask "self"
query_mask = mask[1:, :-1]
return content_mask, query_mask
# perms = torch.tensor([[0, 1, 2, 3, 4, 5, 6],
# [0, 6, 5, 4, 3, 2, 1],
# [0, 3, 1, 5, 2, 4, 6],
# [0, 1, 4, 2, 3, 5, 6],
# [0, 2, 3, 5, 1, 4, 6],
# [0, 2, 1, 3, 5, 4, 6]])
# for perm in perms:
# sz = perm.shape[0]
# mask = torch.zeros((sz, sz))
# for i in range(sz):
# q_idx = perm[i]
# masked_keys = perm[i + 1:]
# mask[q_idx, masked_keys] = float('-inf')
# print(mask)
L= 7
mask = torch.triu(torch.full((L, L), float('-inf')), 1)
content_mask = mask[:-1, :-1].clone() | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vl_str/system.py | strhub/models/vl_str/system.py | # coding=utf-8
import os
import math
import numpy as np
from itertools import permutations
from typing import Sequence, Any, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.optim.lr_scheduler import OneCycleLR
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.utilities.types import STEP_OUTPUT
from strhub.clip import clip
from strhub.models.base import CrossEntropySystem
from .modules import DecoderLayer, Decoder, modify_attn_mask
# an alternative choice when the input argument is not valid
# CLIP_PATH = '/PUT/YOUR/PATH/HERE/pretrained/clip''
CLIP_PATH = '/home/shuai/pretrained/clip'
if not os.path.exists(CLIP_PATH):
CLIP_PATH = '/home/shuzhao/Data/pretrained/clip'
assert os.path.exists(CLIP_PATH)
class VL4STR(CrossEntropySystem):
def __init__(self, charset_train: str, charset_test: str, max_label_length: int,
batch_size: int, lr: float, warmup_pct: float, weight_decay: float,
img_size: Sequence[int], patch_size: Sequence[int], embed_dim: int,
enc_num_heads: int, enc_mlp_ratio: int, enc_depth: int,
dec_num_heads: int, dec_mlp_ratio: int, dec_depth: int,
perm_num: int, perm_forward: bool, perm_mirrored: bool,
decode_ar: bool, refine_iters: int, dropout: float, **kwargs: Any) -> None:
super().__init__(charset_train, charset_test, batch_size, lr, warmup_pct, weight_decay)
self.save_hyperparameters()
self.max_label_length = max_label_length
self.decode_ar = decode_ar
self.refine_iters = refine_iters
self.coef_lr = kwargs["coef_lr"] if "coef_lr" in kwargs.keys() else 1.0
self.coef_wd = kwargs["coef_wd"] if "coef_wd" in kwargs.keys() else 1.0
self.image_freeze_nlayer = kwargs["image_freeze_nlayer"] if "image_freeze_nlayer" in kwargs.keys() else -1
self.text_freeze_nlayer = kwargs["text_freeze_nlayer"] if "text_freeze_nlayer" in kwargs.keys() else -1
self.freeze_language_backbone = self.text_freeze_nlayer >= 12
self.freeze_image_backbone = self.image_freeze_nlayer >= 12
self.use_language_model = kwargs["use_language_model"] if "use_language_model" in kwargs.keys() else False
self.context_length = kwargs["context_length"] if "context_length" in kwargs.keys() else 20
self.cross_loss_w = kwargs["cross_loss_w"] if "cross_loss_w" in kwargs.keys() else 1.0
self.use_share_dim = kwargs["use_share_dim"] if "use_share_dim" in kwargs.keys() else True
self.cross_gt_context = kwargs["cross_gt_context"] if "cross_gt_context" in kwargs.keys() else True
self.cross_cloze_mask = kwargs["cross_cloze_mask"] if "cross_cloze_mask" in kwargs.keys() else False
self.cross_correct_once = kwargs["cross_correct_once"] if "cross_correct_once" in kwargs.keys() else False
self.image_detach = kwargs["image_detach"] if "image_detach" in kwargs.keys() else True
self.cross_token_embeding = kwargs["cross_token_embeding"] if "cross_token_embeding" in kwargs.keys() else False
self.cross_fast_decode = False
self.clip_cls_eot_feature = kwargs["clip_cls_eot_feature"] if "clip_cls_eot_feature" in kwargs.keys() else False
rank_zero_info("\n config of VL4STR: \n"
"\t image_freeze_nlayer: {}, text_freeze_nlayer: {}, freeze_language_backbone: {}, freeze_image_backbone: {} \n"
"\t use_language_model: {}, context_length: {}, cross_token_embeding: {}, cross_loss_weight: {} \n"
"\t use_share_dim: {}, image_detach: {}, clip_cls_eot_feature: {} \n"
"\t cross_gt_context: {}, cross_cloze_mask: {}, cross_fast_decode: {} \n".format(
self.image_freeze_nlayer, self.text_freeze_nlayer, self.freeze_language_backbone, self.freeze_image_backbone,
self.use_language_model, self.context_length, self.cross_token_embeding, self.cross_loss_w,
self.use_share_dim, self.image_detach, self.clip_cls_eot_feature,
self.cross_gt_context, self.cross_cloze_mask, self.cross_fast_decode)
)
assert "clip_pretrained" in kwargs.keys()
if not os.path.exists(kwargs["clip_pretrained"]):
kwargs["clip_pretrained"] = os.path.join(CLIP_PATH, os.path.basename(kwargs["clip_pretrained"]))
print(">>> Try to load CLIP model from {}".format(kwargs["clip_pretrained"]))
assert os.path.exists(kwargs["clip_pretrained"])
# load CLIP model
clip_model, _ = clip.load(name=kwargs["clip_pretrained"], device='cpu')
self.clip_model = clip_model.float()
# modify the attention mask according to context length
self.clip_model.transformer.apply(lambda m: modify_attn_mask(m, context_length=self.context_length))
self.freeze_cip_layers(self.image_freeze_nlayer, self.text_freeze_nlayer)
# visual deoder
vis_embed_dim = self.clip_model.text_projection.shape[-1] if self.use_share_dim else self.clip_model.visual.proj.shape[0]
rank_zero_info("The dimension of the visual decoder is {}.".format(vis_embed_dim))
decoder_layer = DecoderLayer(vis_embed_dim, dec_num_heads, vis_embed_dim * dec_mlp_ratio, dropout)
# We don't predict <bos> nor <pad>, so num_classes=len(self.tokenizer) - 2, label length + 1 for <eos>
self.visual_decoder = Decoder(decoder_layer, dec_depth, norm=nn.LayerNorm(vis_embed_dim),
embed_dim=vis_embed_dim,
dropout=dropout,
num_classes=len(self.tokenizer) - 2,
charset_size=len(self.tokenizer),
max_label_length=max_label_length + 1)
# cross-modal decoder
if self.use_language_model:
cross_embed_dim = self.clip_model.text_projection.shape[-1]
decoder_layer = DecoderLayer(cross_embed_dim, dec_num_heads, cross_embed_dim * dec_mlp_ratio, dropout)
self.cross_decoder = Decoder(decoder_layer, dec_depth, norm=nn.LayerNorm(cross_embed_dim),
embed_dim=cross_embed_dim,
dropout=dropout,
num_classes=len(self.tokenizer) - 2,
charset_size=len(self.tokenizer),
max_label_length=max_label_length + 1)
# Perm/attn mask stuff
self.rng = np.random.default_rng()
self.max_gen_perms = perm_num // 2 if perm_mirrored else perm_num
self.perm_forward = perm_forward
self.perm_mirrored = perm_mirrored
def encode(self, img: torch.Tensor):
"""extract CLIP image features"""
if self.freeze_image_backbone:
with torch.no_grad():
memory = self.clip_model.encode_image(img, cls=self.clip_cls_eot_feature, projection=self.use_share_dim)
else:
memory = self.clip_model.encode_image(img, cls=self.clip_cls_eot_feature, projection=self.use_share_dim)
return memory if not self.clip_cls_eot_feature else torch.unsqueeze(memory, dim=1)
def visual_decode(self, tgt: torch.Tensor, memory: torch.Tensor,
tgt_query: Optional[Tensor] = None, tgt_query_mask: Optional[Tensor] = None,
content_mask: Optional[Tensor] = None, tgt_padding_mask: Optional[Tensor] = None, ):
return self.visual_decoder(tgt, memory, tgt_query, tgt_query_mask, content_mask, tgt_padding_mask)
def encoder_cross_modal_feature(self, prev_logits, image_feat):
prev_logits = prev_logits.detach().clone()
image_features = image_feat.detach().clone() if self.image_detach else image_feat
if not self.use_share_dim:
image_features = torch.matmul(image_features, self.clip_model.visual.proj)
# get previous predictions
probs = prev_logits.softmax(-1)
# adapt for the test charset, CLIP is not sensitive to uppercase or symbols
captions, _ = self.tokenizer.decode_fast(probs, charset_adapter=self.charset_adapter)
text = clip.tokenize(captions, context_length=self.context_length, truncate=True).to(image_feat.device)
# return all text features
if self.freeze_language_backbone:
with torch.no_grad():
text_features = self.clip_model.token_embedding(text) if self.cross_token_embeding else \
self.clip_model.encode_text(text, eot=self.clip_cls_eot_feature)
else:
text_features = self.clip_model.token_embedding(text) if self.cross_token_embeding else \
self.clip_model.encode_text(text, eot=self.clip_cls_eot_feature)
if self.clip_cls_eot_feature:
text_features = torch.unsqueeze(text_features, dim=1)
return torch.cat([image_features, text_features], dim=1)
def cross_decode(self, prev_logits, tgt: torch.Tensor, memory: torch.Tensor,
tgt_query: Optional[Tensor] = None, tgt_query_mask: Optional[Tensor] = None,
content_mask: Optional[Tensor] = None, tgt_padding_mask: Optional[Tensor] = None,
cross_memory = None):
if cross_memory is None:
cross_memory = self.encoder_cross_modal_feature(prev_logits, memory)
return self.cross_decoder(tgt, cross_memory, tgt_query, tgt_query_mask, content_mask, tgt_padding_mask)
def forward(self, images: Tensor, max_length: Optional[int] = None) -> Tensor:
testing = max_length is None
max_length = self.max_label_length if max_length is None else min(max_length, self.max_label_length)
bs = images.shape[0]
# +1 for <eos> at end of sequence.
num_steps = max_length + 1
memory = self.encode(images)
# Query positions up to `num_steps`
vis_pos_queries = self.visual_decoder.pos_queries[:, :num_steps].expand(bs, -1, -1)
crs_pos_queries = self.cross_decoder.pos_queries[:, :num_steps].expand(bs, -1, -1) if self.use_language_model else None
# a left-to-right auto-regressive mask, special case for the forward permutation
content_mask = query_mask = torch.triu(torch.full((num_steps, num_steps), float('-inf'), device=self._device), 1)
bos = torch.full((bs, 1), self.bos_id, dtype=torch.long, device=self._device)
if self.decode_ar:
tgt_in = torch.full((bs, num_steps), self.pad_id, dtype=torch.long, device=self._device)
tgt_in[:, 0] = self.bos_id
logits = []
all_visual_vec = []
for i in range(num_steps):
j = i + 1 # next token index
# Efficient decoding: Input the context up to the ith token. We use only one query (at position = i) at a time.
# This works because of the lookahead masking effect of the canonical (forward) AR context.
# Past tokens have no access to future tokens, hence are fixed once computed.
p_i, visual_vec = self.visual_decode(tgt_in[:, :j], memory, tgt_query=vis_pos_queries[:, i:j],
tgt_query_mask=query_mask[i:j, :j], content_mask=content_mask[:j, :j],)
# the next token probability is in the output's ith token position
logits.append(p_i)
all_visual_vec.append(visual_vec.clone())
if j < num_steps:
# greedy decode. add the next token index to the target input
tgt_in[:, j] = p_i.squeeze().argmax(-1)
if testing and (tgt_in == self.eos_id).any(dim=-1).all():
break
logits = torch.cat(logits, dim=1)
visual_vec = torch.cat(all_visual_vec, dim=1)
else:
# No prior context, so input is just <bos>. We query all positions.
# tgt_in = torch.full((bs, 1), self.bos_id, dtype=torch.long, device=self._device)
logits, visual_vec = self.visual_decode(bos, memory, tgt_query=vis_pos_queries)
if self.use_language_model:
crs_num_steps = logits.shape[1]
if self.cross_fast_decode:
# just use visual output as input context
cross_logits, cross_vec = self.cross_decode(logits, tgt_in[:, :crs_num_steps], memory, tgt_query=crs_pos_queries[:, :crs_num_steps],
tgt_query_mask=query_mask[:crs_num_steps, :crs_num_steps],
content_mask=content_mask[:crs_num_steps, :crs_num_steps],)
else:
# prediction of cross-modal branch as input context
cross_memory = self.encoder_cross_modal_feature(logits, memory)
cross_logits = []
all_cross_vec = []
for i in range(crs_num_steps):
j = i + 1 # next token index
p_i, cross_vec = self.cross_decode(logits, tgt_in[:, :j], memory, tgt_query=crs_pos_queries[:, i:j],
tgt_query_mask=query_mask[i:j, :j], content_mask=content_mask[:j, :j], cross_memory=cross_memory)
cross_logits.append(p_i)
all_cross_vec.append(cross_vec.clone())
if j < crs_num_steps:
tgt_in[:, j] = p_i.squeeze().argmax(-1)
cross_logits = torch.cat(cross_logits, dim=1)
cross_vec = torch.cat(all_cross_vec, dim=1)
if self.refine_iters:
# For iterative refinement, we always use a 'cloze' mask.
# We can derive it from the AR forward mask by unmasking the token context to the right.
query_mask[torch.triu(torch.ones(num_steps, num_steps, dtype=torch.bool, device=self._device), 2)] = 0
bos = torch.full((bs, 1), self.bos_id, dtype=torch.long, device=self._device)
for i in range(self.refine_iters):
# Prior context is the previous output.
tgt_in = torch.cat([bos, logits[:, :-1].argmax(-1)], dim=1)
tgt_padding_mask = ((tgt_in == self.eos_id).cumsum(-1) > 0) # mask tokens beyond the first EOS token.
logits, visual_vec = self.visual_decode(tgt_in, memory,
tgt_query=vis_pos_queries, tgt_query_mask=query_mask[:, :tgt_in.shape[1]],
content_mask=content_mask, tgt_padding_mask=tgt_padding_mask,)
if self.use_language_model:
tgt_in = torch.cat([bos, cross_logits[:, :-1].argmax(-1)], dim=1)
tgt_padding_mask = ((tgt_in == self.eos_id).cumsum(-1) > 0)
cross_logits, cross_vec = self.cross_decode(logits, tgt_in, memory,
tgt_query=crs_pos_queries, tgt_query_mask=query_mask[:, :tgt_in.shape[1]],
content_mask=content_mask, tgt_padding_mask=tgt_padding_mask,)
# TODO: how to fuse the final predictions
logits = cross_logits
return logits
def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
images, labels = batch
tgt = self.tokenizer.encode(labels, self._device)
# Encode the source sequence (i.e. the image codes)
memory = self.encode(images)
# Prepare the target sequences (input and output)
tgt_perms = self.gen_tgt_perms(tgt)
tgt_in = tgt[:, :-1] # remove [EOS] token
tgt_out = tgt[:, 1:] # remove [BOS] token
# cross_target = tgt[:, 1:]
# max_len = min(cross_target.shape[1] - 1, self.max_label_length)
# The [EOS] token is not depended upon by any other token in any permutation ordering
tgt_padding_mask = (tgt_in == self.pad_id) | (tgt_in == self.eos_id)
bs = images.shape[0]
bos = torch.full((bs, 1), self.bos_id, dtype=torch.long, device=self._device)
# try to use a cloze_mask for cross decoding
L = tgt_in.shape[1]
cloze_content_mask = cloze_query_mask = torch.triu(torch.full((L, L), float('-inf'), device=self._device), 1)
cloze_query_mask[torch.triu(torch.ones(L, L, dtype=torch.bool, device=self._device), 2)] = 0
loss = 0
loss_numel = 0
all_inter_vars = []
n = (tgt_out != self.pad_id).sum().item()
for i, perm in enumerate(tgt_perms):
content_mask, query_mask = self.generate_attn_masks(perm)
visual_logits, visual_vec = self.visual_decode(tgt_in, memory, tgt_query_mask=query_mask,
content_mask=content_mask, tgt_padding_mask=tgt_padding_mask, )
loss += n * F.cross_entropy(visual_logits.flatten(end_dim=1), tgt_out.flatten(), ignore_index=self.pad_id)
all_inter_vars.append((visual_logits.detach().clone(), query_mask, content_mask))
# cross forward
if self.use_language_model:
# 1. use the prediction of visual branch as context, keep the shape of cross_tgt_in the same as `tgt_in`
# 2. use the GT as the context
cross_tgt_in = tgt_in if self.cross_gt_context else torch.cat([bos, visual_logits[:, :-1].argmax(-1)], dim=1)
cross_query_mask = cloze_query_mask if self.cross_cloze_mask else query_mask
cross_content_mask = cloze_content_mask if self.cross_cloze_mask else content_mask
cross_logits, cross_vec = self.cross_decode(visual_logits, cross_tgt_in, memory, tgt_query_mask=cross_query_mask,
content_mask=cross_content_mask, tgt_padding_mask=tgt_padding_mask, )
loss += self.cross_loss_w * n * F.cross_entropy(cross_logits.flatten(end_dim=1), tgt_out.flatten(), ignore_index=self.pad_id)
loss_numel += n
# After the second iteration (i.e. done with canonical and reverse orderings),
# remove the [EOS] tokens for the succeeding perms
if i == 1:
tgt_out = torch.where(tgt_out == self.eos_id, self.pad_id, tgt_out)
n = (tgt_out != self.pad_id).sum().item()
loss /= loss_numel
self.log('loss', loss)
return loss
def gen_tgt_perms(self, tgt):
"""Generate shared permutations for the whole batch.
This works because the same attention mask can be used for the shorter sequences
because of the padding mask.
An example fo perms with string length 5
>>> tensor([[0, 1, 2, 3, 4, 5, 6], # canonical order
[0, 6, 5, 4, 3, 2, 1], # reverse order
[0, 3, 1, 5, 2, 4, 6],
[0, 1, 4, 2, 3, 5, 6],
[0, 2, 3, 5, 1, 4, 6],
[0, 2, 1, 3, 5, 4, 6]])
"""
# We don't permute the position of BOS, we permute EOS separately
max_num_chars = tgt.shape[1] - 2
# Special handling for 1-character sequences
if max_num_chars == 1:
return torch.arange(3, device=self._device).unsqueeze(0)
perms = [torch.arange(max_num_chars, device=self._device)] if self.perm_forward else []
# Additional permutations if needed
max_perms = math.factorial(max_num_chars)
if self.perm_mirrored:
max_perms //= 2
num_gen_perms = min(self.max_gen_perms, max_perms)
# For 4-char sequences and shorter, we generate all permutations and sample from the pool to avoid collisions
# Note that this code path might NEVER get executed since the labels in a mini-batch typically exceed 4 chars.
if max_num_chars < 5:
# Pool of permutations to sample from. We only need the first half (if complementary option is selected)
# Special handling for max_num_chars == 4 which correctly divides the pool into the flipped halves
if max_num_chars == 4 and self.perm_mirrored:
selector = [0, 3, 4, 6, 9, 10, 12, 16, 17, 18, 19, 21]
else:
selector = list(range(max_perms))
perm_pool = torch.as_tensor(list(permutations(range(max_num_chars), max_num_chars)), device=self._device)[selector]
# If the forward permutation is always selected, no need to add it to the pool for sampling
if self.perm_forward:
perm_pool = perm_pool[1:]
perms = torch.stack(perms)
if len(perm_pool):
i = self.rng.choice(len(perm_pool), size=num_gen_perms - len(perms), replace=False)
perms = torch.cat([perms, perm_pool[i]])
else:
perms.extend([torch.randperm(max_num_chars, device=self._device) for _ in range(num_gen_perms - len(perms))])
perms = torch.stack(perms)
if self.perm_mirrored:
# Add complementary pairs
comp = perms.flip(-1)
# Stack in such a way that the pairs are next to each other.
perms = torch.stack([perms, comp]).transpose(0, 1).reshape(-1, max_num_chars)
# NOTE:
# The only meaningful way of permuting the EOS position is by moving it one character position at a time.
# However, since the number of permutations = T! and number of EOS positions = T + 1, the number of possible EOS
# positions will always be much less than the number of permutations (unless a low perm_num is set).
# Thus, it would be simpler to just train EOS using the full and null contexts rather than trying to evenly
# distribute it across the chosen number of permutations.
# Add position indices of BOS and EOS
bos_idx = perms.new_zeros((len(perms), 1))
eos_idx = perms.new_full((len(perms), 1), max_num_chars + 1)
perms = torch.cat([bos_idx, perms + 1, eos_idx], dim=1)
# Special handling for the reverse direction. This does two things:
# 1. Reverse context for the characters
# 2. Null context for [EOS] (required for learning to predict [EOS] in NAR mode)
if len(perms) > 1:
perms[1, 1:] = max_num_chars + 1 - torch.arange(max_num_chars + 1, device=self._device)
return perms
def generate_attn_masks(self, perm):
"""Generate attention masks given a sequence permutation (includes pos. for bos and eos tokens)
:param perm: the permutation sequence. i = 0 is always the BOS
:return: lookahead attention masks
"""
sz = perm.shape[0]
mask = torch.zeros((sz, sz), device=self._device)
for i in range(sz):
query_idx = perm[i]
masked_keys = perm[i + 1:]
mask[query_idx, masked_keys] = float('-inf')
content_mask = mask[:-1, :-1].clone()
mask[torch.eye(sz, dtype=torch.bool, device=self._device)] = float('-inf') # mask "self"
query_mask = mask[1:, :-1]
return content_mask, query_mask
def configure_optimizers(self):
agb = self.trainer.accumulate_grad_batches
# Linear scaling so that the effective learning rate is constant regardless of the number of GPUs used with DDP.
# lr_scale = agb * math.sqrt(self.trainer.num_devices) * self.batch_size / 256.
lr_scale = agb * (self.trainer.num_devices * self.batch_size) / 512
lr = lr_scale * self.lr
# https://github.com/mlfoundations/open_clip/blob/b4cf9269b0b11c0eea47cb16039369a46bd67449/src/training/main.py#L171
exclude = lambda n, p: p.ndim < 2 or "bn" in n or "ln" in n or "bias" in n or 'logit_scale' in n \
or "pos_queries" in n or "text_embed" in n
include = lambda n, p: not exclude(n, p)
# encoder parameters
encoder_params = list(self.clip_model.named_parameters())
enc_gain_or_bias_params = [p for n, p in encoder_params if exclude(n, p) and p.requires_grad]
enc_rest_params = [p for n, p in encoder_params if include(n, p) and p.requires_grad]
# decoder parameters
decoder_params = [(n, p) for n, p in list(self.named_parameters()) if "clip_model" not in n]
dec_gain_or_bias_params = [p for n, p in decoder_params if exclude(n, p) and p.requires_grad]
dec_rest_params = [p for n, p in decoder_params if include(n, p) and p.requires_grad]
rank_zero_info("[VL4STR] The length of encoder params with and without weight decay is {} and {}, respectively.".format(
len(enc_rest_params), len(enc_gain_or_bias_params)
))
rank_zero_info("[VL4STR] The length of decoder params with and without weight decay is {} and {}, respectively.".format(
len(dec_rest_params), len(dec_gain_or_bias_params)
))
optimizer = torch.optim.AdamW(
[
{"params": enc_gain_or_bias_params, "weight_decay": 0., 'lr': lr},
{"params": enc_rest_params, "weight_decay": self.weight_decay, 'lr': lr},
{"params": dec_gain_or_bias_params, "weight_decay": 0., 'lr': lr * self.coef_lr},
{"params": dec_rest_params, "weight_decay": self.weight_decay * self.coef_wd, 'lr': lr * self.coef_lr},
],
lr=lr, betas=(0.9, 0.98), eps=1.0e-6,
)
sched = OneCycleLR(optimizer, [lr, lr, lr * self.coef_lr, lr * self.coef_lr],
self.trainer.estimated_stepping_batches, pct_start=self.warmup_pct,
cycle_momentum=False)
return {'optimizer': optimizer, 'lr_scheduler': {'scheduler': sched, 'interval': 'step'}}
def _freeze_backbones(self):
"""set frozen backbones to eval mode"""
for name, mod in self.clip_model.named_modules():
if name.startswith("visual.transformer.resblocks."):
layer_num = int(name.split(".resblocks.")[1].split(".")[0])
if layer_num < self.image_freeze_nlayer:
mod.eval()
# if self.image_freeze_layer_divisor > 0 and (layer_num + 1) % self.image_freeze_layer_divisor == 0:
# mod.eval()
elif name.startswith("transformer.resblocks."):
layer_num = int(name.split(".resblocks.")[1].split(".")[0])
if layer_num < self.text_freeze_nlayer:
mod.eval()
if self.freeze_language_backbone:
self.clip_model.transformer.eval()
self.clip_model.ln_final.eval()
def freeze_cip_layers(self, image_freeze_nlayer, text_freeze_nlayer, image_freeze_layer_divisor=-1, image_only_fc=False):
"""
freeze the parameters of layers with No.layer < image_freeze_nlayer or text_freeze_nlayer,
"""
assert image_freeze_nlayer <= 12 and text_freeze_nlayer <=12 and image_freeze_layer_divisor <= 12
if hasattr(self, "clip_model"):
if image_freeze_nlayer > -1:
for name, param in self.clip_model.visual.named_parameters():
# top layers always need to train
if name.startswith("ln_post.") or name.startswith("proj") or name.startswith("conv1") or name.startswith("ln_pre"):
continue
elif name.startswith("transformer.resblocks."):
layer_num = int(name.split(".resblocks.")[1].split(".")[0])
if layer_num >= image_freeze_nlayer:
continue
param.requires_grad = False
#### freeze the layers which the index can be divided by image_freeze_layer_divisor
# if image_freeze_layer_divisor > 0:
# for name, param in self.clip_model.visual.named_parameters():
# if name.startswith("transformer.resblocks."):
# layer_num = int(name.split(".resblocks.")[1].split(".")[0])
# if (layer_num + 1) % image_freeze_layer_divisor == 0:
# param.requires_grad = False
#### only train the top fc layer
# if image_only_fc:
# for name, param in self.clip_model.visual.named_parameters():
# if "out_proj" in name or "conv1" in name or name.startswith("ln_post.") or name.startswith("proj"):
# continue
# param.requires_grad = False
if text_freeze_nlayer > -1:
for name, param in self.clip_model.named_parameters():
# top layers always need to train
if name.startswith("ln_final.") or name.startswith("text_projection") or name.startswith("visual"):
continue
elif name.startswith("transformer.resblocks."):
layer_num = int(name.split(".resblocks.")[1].split(".")[0])
if layer_num >= text_freeze_nlayer:
continue
param.requires_grad = False
# freeze the whole backbones and related parameters
if text_freeze_nlayer >= 12:
for n, p in self.clip_model.named_parameters():
# exclude visual parameters
if "visual" not in n:
if "transformer" in n or "token_embedding" in n or "ln_final" in n or "text_projection" in n:
p.requires_grad = False
if image_freeze_nlayer >= 12:
for n, p in self.clip_model.visual.named_parameters():
p.requires_grad = False
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super().train(mode)
if mode:
self._freeze_backbones()
return self
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vl_str/modules.py | strhub/models/vl_str/modules.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
from functools import partial
from typing import Optional, Sequence, Callable
import torch
from torch import nn as nn, Tensor
from torch.nn import functional as F
from torch.nn.modules import transformer
from timm.models.helpers import named_apply
from strhub.models.utils import init_weights
class DecoderLayer(nn.Module):
"""A Transformer decoder layer supporting two-stream attention (XLNet)
This implements a pre-LN decoder, as opposed to the post-LN default in PyTorch."""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='gelu',
layer_norm_eps=1e-5):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=True)
self.cross_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=True)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm_q = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm_c = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = transformer._get_activation_fn(activation)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.gelu
super().__setstate__(state)
def forward_stream(self, tgt: Tensor, tgt_norm: Tensor, tgt_kv: Tensor, memory: Tensor, attn_mask: Optional[Tensor],
key_padding_mask: Optional[Tensor]):
"""Forward pass for a single stream (i.e. content or query)
tgt_norm is just a LayerNorm'd tgt. Added as a separate parameter for efficiency.
Both tgt_kv and memory are expected to be LayerNorm'd too.
memory is LayerNorm'd by ViT.
"""
tgt2, sa_weights = self.self_attn(tgt_norm, tgt_kv, tgt_kv, attn_mask=attn_mask, key_padding_mask=key_padding_mask)
tgt = tgt + self.dropout1(tgt2)
tgt2, ca_weights = self.cross_attn(self.norm1(tgt), memory, memory)
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(self.norm2(tgt)))))
tgt = tgt + self.dropout3(tgt2)
return tgt, sa_weights, ca_weights
def forward(self, query, content, memory, tgt_query_mask: Optional[Tensor] = None, content_mask: Optional[Tensor] = None,
content_key_padding_mask: Optional[Tensor] = None, update_content: bool = True):
query_norm = self.norm_q(query)
content_norm = self.norm_c(content)
query = self.forward_stream(query, query_norm, content_norm, memory, tgt_query_mask, content_key_padding_mask)[0]
if update_content:
content = self.forward_stream(content, content_norm, content_norm, memory, content_mask, content_key_padding_mask)[0]
return query, content
class Decoder(nn.Module):
__constants__ = ['norm']
def __init__(self, decoder_layer, num_layers, norm, embed_dim=512, dropout=0.0, num_classes=94,
charset_size=94, max_label_length=25):
"""a self-contained decoder for character extraction"""
super().__init__()
self.layers = transformer._get_clones(decoder_layer, num_layers)
self.text_embed = TokenEmbedding(charset_size, embed_dim)
self.pos_queries = nn.Parameter(torch.Tensor(1, max_label_length, embed_dim))
self.num_layers = num_layers
self.norm = norm
self.dropout = nn.Dropout(p=dropout)
self.head = nn.Linear(embed_dim, num_classes, bias=True)
named_apply(partial(init_weights, exclude=['none']), self)
nn.init.trunc_normal_(self.pos_queries, std=.02)
def forward(self, tgt, memory,
tgt_query: Optional[Tensor] = None,
tgt_query_mask: Optional[Tensor] = None,
content_mask: Optional[Tensor] = None,
content_key_padding_mask: Optional[Tensor] = None):
N, L = tgt.shape
# <bos> stands for the null context. We only supply position information for characters after <bos>.
null_ctx = self.text_embed(tgt[:, :1])
tgt_emb = self.pos_queries[:, :L - 1] + self.text_embed(tgt[:, 1:])
content = self.dropout(torch.cat([null_ctx, tgt_emb], dim=1))
if tgt_query is None:
tgt_query = self.pos_queries[:, :L].expand(N, -1, -1)
query = self.dropout(tgt_query)
# forward layers
for i, mod in enumerate(self.layers):
last = i == len(self.layers) - 1
query, content = mod(query, content, memory, tgt_query_mask, content_mask, content_key_padding_mask,
update_content=not last)
query = self.norm(query)
# prediction
logits = self.head(query)
# return prediction and feature
return logits, query
class TokenEmbedding(nn.Module):
def __init__(self, charset_size: int, embed_dim: int):
super().__init__()
self.embedding = nn.Embedding(charset_size, embed_dim)
self.embed_dim = embed_dim
def forward(self, tokens: torch.Tensor):
return math.sqrt(self.embed_dim) * self.embedding(tokens)
class Hook():
# A simple hook class that returns the input and output of a layer during forward/backward pass
def __init__(self, module, backward=False):
if backward == False:
self.hook = module.register_forward_hook(self.hook_fn)
else:
self.hook = module.register_backward_hook(self.hook_fn)
def hook_fn(self, module, input, output):
self.input = input
self.output = output
def close(self):
self.hook.remove()
def modify_attn_mask(m, context_length=10):
if hasattr(m, "attn_mask"):
if m.attn_mask is not None:
m.attn_mask = m.attn_mask[:context_length, :context_length]
class FusionFC(nn.Module):
def __init__(self, d_model, num_classes):
super().__init__()
self.w_att = nn.Linear(2 * d_model, d_model)
self.cls = nn.Linear(d_model, num_classes)
# init
for m in self.modules():
init_weights(m)
def forward(self, feature1, feature2, detach=True):
"""
Args:
feature1: (N, T, E) where T is length, N is batch size and d is dim of model
feature2: (N, T, E) shape the same as l_feature
"""
if detach:
feature1 = feature1.detach().clone()
feature2 = feature2.detach().clone()
f = torch.cat((feature1, feature2), dim=2)
f_att = torch.sigmoid(self.w_att(f))
output = f_att * feature2 + (1.0 - f_att) * feature1
# (N, T, C)
logits = self.cls(output)
return logits
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vl_str/loss.py | strhub/models/vl_str/loss.py | """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import torch
import torch.distributed.nn
from torch import distributed as dist, nn as nn
from torch.nn import functional as F
def gather_features(
image_features,
text_features,
local_loss=False,
gather_with_grad=False,
rank=0,
world_size=1,
use_horovod=False,
):
if use_horovod:
raise NotImplementedError
else:
# We gather tensors from all gpus
if gather_with_grad:
all_image_features = torch.cat(
torch.distributed.nn.all_gather(image_features), dim=0
)
all_text_features = torch.cat(
torch.distributed.nn.all_gather(text_features), dim=0
)
else:
gathered_image_features = [
torch.zeros_like(image_features) for _ in range(world_size)
]
gathered_text_features = [
torch.zeros_like(text_features) for _ in range(world_size)
]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
return all_image_features, all_text_features
class ClipLoss(nn.Module):
def __init__(
self,
local_loss=False,
gather_with_grad=False,
cache_labels=False,
rank=0,
world_size=1,
use_horovod=False,
):
super().__init__()
self.local_loss = local_loss
self.gather_with_grad = gather_with_grad
self.cache_labels = cache_labels
self.rank = rank
self.world_size = world_size
self.use_horovod = use_horovod
# cache state
self.prev_num_logits = 0
self.labels = {}
def forward(self, image_features, text_features, logit_scale):
device = image_features.device
if self.world_size > 1:
all_image_features, all_text_features = gather_features(
image_features,
text_features,
self.local_loss,
self.gather_with_grad,
self.rank,
self.world_size,
self.use_horovod,
)
if self.local_loss:
logits_per_image = logit_scale * image_features @ all_text_features.T
logits_per_text = logit_scale * text_features @ all_image_features.T
else:
logits_per_image = (
logit_scale * all_image_features @ all_text_features.T
)
logits_per_text = logits_per_image.T
else:
logits_per_image = logit_scale * image_features @ text_features.T
logits_per_text = logit_scale * text_features @ image_features.T
# calculated ground-truth and cache if enabled
num_logits = logits_per_image.shape[0]
if self.prev_num_logits != num_logits or device not in self.labels:
labels = torch.arange(num_logits, device=device, dtype=torch.long)
if self.world_size > 1 and self.local_loss:
labels = labels + num_logits * self.rank
if self.cache_labels:
self.labels[device] = labels
self.prev_num_logits = num_logits
else:
labels = self.labels[device]
total_loss = (
F.cross_entropy(logits_per_image, labels)
+ F.cross_entropy(logits_per_text, labels)
) / 2
return total_loss | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vl_str/__init__.py | strhub/models/vl_str/__init__.py | # coding=utf-8 | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/str_adapter/system.py | strhub/models/str_adapter/system.py | # CLIP Adapters for STR
import os
import math
import warnings
from functools import partial
from itertools import permutations
from typing import Sequence, Any, Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.optim.lr_scheduler import OneCycleLR
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.utilities.types import STEP_OUTPUT
from timm.models.helpers import named_apply
from strhub.clip import clip
from strhub.models.utils import init_weights
from strhub.models.base import CrossEntropySystem
from .modules import DecoderLayer, Decoder, TokenEmbedding
from .modules import IndentityAdapter, LinearAdapter, AttentionAdapter
from .modules import LadderSideAdapter, LinearLadderSideAdapter, LadderSideAdapterPruning
# an alternative choice when the input argument is not valid
CLIP_PATH = '/PUT/YOUR/PATH/HERE/pretrained/clip'
class STRAdapter(CrossEntropySystem):
def __init__(self, charset_train: str, charset_test: str, max_label_length: int,
batch_size: int, lr: float, warmup_pct: float, weight_decay: float,
img_size: Sequence[int], patch_size: Sequence[int],
dec_num_heads: int, dec_mlp_ratio: int, dec_depth: int,
perm_num: int, perm_forward: bool, perm_mirrored: bool,
decode_ar: bool, refine_iters: int, dropout: float, **kwargs: Any) -> None:
super().__init__(charset_train, charset_test, batch_size, lr, warmup_pct, weight_decay)
self.save_hyperparameters()
# some args
self.max_label_length = max_label_length
self.decode_ar = decode_ar
self.refine_iters = refine_iters
self.coef_lr = kwargs["coef_lr"] if "coef_lr" in kwargs.keys() else 1.0
self.coef_wd = kwargs["coef_wd"] if "coef_wd" in kwargs.keys() else 1.0
self.freeze_backbone = kwargs["freeze_backbone"] if "freeze_backbone" in kwargs.keys() else False
self.adapter_type = kwargs["adapter_type"] if "adapter_type" in kwargs.keys() else "linear"
self.block_ids = kwargs["block_ids"] if "block_ids" in kwargs.keys() else [0, 4, 8, 11]
self.prune_reduction = kwargs["prune_reduction"] if "prune_reduction" in kwargs.keys() else 4
rank_zero_info("[STRAdapter] freeze_backbone {}, adapter_type {}, block_ids {}".format(
self.freeze_backbone, self.adapter_type, self.block_ids))
# load CLIP model
assert "clip_pretrained" in kwargs.keys()
clip_model, _ = clip.load(name=kwargs["clip_pretrained"], device='cpu')
self.clip_model = clip_model.float()
assert "clip_pretrained" in kwargs.keys()
if not os.path.exists(kwargs["clip_pretrained"]):
kwargs["clip_pretrained"] = os.path.join(CLIP_PATH, os.path.basename(kwargs["clip_pretrained"]))
assert os.path.exists(kwargs["clip_pretrained"])
embed_dim = self.clip_model.text_projection.shape[-1]
if self.adapter_type == "linear":
self.adapter = LinearAdapter(embed_dim, reduction=2, ratio=0.2)
elif self.adapter_type == "attention":
self.adapter = AttentionAdapter(embed_dim, dec_num_heads, ratio=0.5)
elif self.adapter_type == "ladder":
self.adapter = LadderSideAdapter(self.clip_model, block_ids=self.block_ids)
elif self.adapter_type == "linear_ladder":
self.adapter = LinearLadderSideAdapter(self.clip_model, block_ids=self.block_ids)
elif self.adapter_type == "ladder_pruning":
self.adapter = LadderSideAdapterPruning(self.clip_model, block_ids=self.block_ids, reduction=self.prune_reduction)
else:
warnings.warn("Warning: you are using IndentityAdapter")
self.adapter = IndentityAdapter()
# decoder module
decoder_layer = DecoderLayer(embed_dim, dec_num_heads, embed_dim * dec_mlp_ratio, dropout)
self.decoder = Decoder(decoder_layer, num_layers=dec_depth, norm=nn.LayerNorm(embed_dim))
# Perm/attn mask stuff
self.rng = np.random.default_rng()
self.max_gen_perms = perm_num // 2 if perm_mirrored else perm_num
self.perm_forward = perm_forward
self.perm_mirrored = perm_mirrored
# We don't predict <bos> nor <pad>
self.head = nn.Linear(embed_dim, len(self.tokenizer) - 2, bias=True)
self.text_embed = TokenEmbedding(len(self.tokenizer), embed_dim)
# +1 for <eos>
self.pos_queries = nn.Parameter(torch.Tensor(1, max_label_length + 1, embed_dim))
self.dropout = nn.Dropout(p=dropout)
# init decoder
named_apply(partial(init_weights, exclude=['clip_model', 'adapter']), self)
nn.init.trunc_normal_(self.pos_queries, std=.02)
def encode(self, img: torch.Tensor):
if self.freeze_backbone:
self.clip_model.eval()
with torch.no_grad():
memory = self.clip_model.encode_image(img)
else:
memory = self.clip_model.encode_image(img)
memory = self.adapter(memory)
return memory
def decode(self, tgt: torch.Tensor, memory: torch.Tensor, tgt_mask: Optional[Tensor] = None,
tgt_padding_mask: Optional[Tensor] = None, tgt_query: Optional[Tensor] = None,
tgt_query_mask: Optional[Tensor] = None):
N, L = tgt.shape
# <bos> stands for the null context. We only supply position information for characters after <bos>.
null_ctx = self.text_embed(tgt[:, :1])
tgt_emb = self.pos_queries[:, :L - 1] + self.text_embed(tgt[:, 1:])
tgt_emb = self.dropout(torch.cat([null_ctx, tgt_emb], dim=1))
if tgt_query is None:
tgt_query = self.pos_queries[:, :L].expand(N, -1, -1)
tgt_query = self.dropout(tgt_query)
return self.decoder(tgt_query, tgt_emb, memory, tgt_query_mask, tgt_mask, tgt_padding_mask)
def forward(self, images: Tensor, max_length: Optional[int] = None) -> Tensor:
testing = max_length is None
max_length = self.max_label_length if max_length is None else min(max_length, self.max_label_length)
bs = images.shape[0]
# +1 for <eos> at end of sequence.
num_steps = max_length + 1
memory = self.encode(images)
# Query positions up to `num_steps`
pos_queries = self.pos_queries[:, :num_steps].expand(bs, -1, -1)
# Special case for the forward permutation. Faster than using `generate_attn_masks()`
tgt_mask = query_mask = torch.triu(torch.full((num_steps, num_steps), float('-inf'), device=self._device), 1)
if self.decode_ar:
tgt_in = torch.full((bs, num_steps), self.pad_id, dtype=torch.long, device=self._device)
tgt_in[:, 0] = self.bos_id
logits = []
for i in range(num_steps):
j = i + 1 # next token index
# Efficient decoding:
# Input the context up to the ith token. We use only one query (at position = i) at a time.
# This works because of the lookahead masking effect of the canonical (forward) AR context.
# Past tokens have no access to future tokens, hence are fixed once computed.
tgt_out = self.decode(tgt_in[:, :j], memory, tgt_mask[:j, :j], tgt_query=pos_queries[:, i:j],
tgt_query_mask=query_mask[i:j, :j])
# the next token probability is in the output's ith token position
p_i = self.head(tgt_out)
logits.append(p_i)
if j < num_steps:
# greedy decode. add the next token index to the target input
tgt_in[:, j] = p_i.squeeze().argmax(-1)
# Efficient batch decoding: If all output words have at least one EOS token, end decoding.
if testing and (tgt_in == self.eos_id).any(dim=-1).all():
break
logits = torch.cat(logits, dim=1)
else:
# No prior context, so input is just <bos>. We query all positions.
tgt_in = torch.full((bs, 1), self.bos_id, dtype=torch.long, device=self._device)
tgt_out = self.decode(tgt_in, memory, tgt_query=pos_queries)
logits = self.head(tgt_out)
if self.refine_iters:
# For iterative refinement, we always use a 'cloze' mask.
# We can derive it from the AR forward mask by unmasking the token context to the right.
query_mask[torch.triu(torch.ones(num_steps, num_steps, dtype=torch.bool, device=self._device), 2)] = 0
bos = torch.full((bs, 1), self.bos_id, dtype=torch.long, device=self._device)
for i in range(self.refine_iters):
# Prior context is the previous output.
tgt_in = torch.cat([bos, logits[:, :-1].argmax(-1)], dim=1)
tgt_padding_mask = ((tgt_in == self.eos_id).cumsum(-1) > 0) # mask tokens beyond the first EOS token.
tgt_out = self.decode(tgt_in, memory, tgt_mask, tgt_padding_mask,
tgt_query=pos_queries, tgt_query_mask=query_mask[:, :tgt_in.shape[1]])
logits = self.head(tgt_out)
return logits
def gen_tgt_perms(self, tgt):
"""Generate shared permutations for the whole batch.
This works because the same attention mask can be used for the shorter sequences
because of the padding mask.
"""
# We don't permute the position of BOS, we permute EOS separately
max_num_chars = tgt.shape[1] - 2
# Special handling for 1-character sequences
if max_num_chars == 1:
return torch.arange(3, device=self._device).unsqueeze(0)
perms = [torch.arange(max_num_chars, device=self._device)] if self.perm_forward else []
# Additional permutations if needed
max_perms = math.factorial(max_num_chars)
if self.perm_mirrored:
max_perms //= 2
num_gen_perms = min(self.max_gen_perms, max_perms)
# For 4-char sequences and shorter, we generate all permutations and sample from the pool to avoid collisions
# Note that this code path might NEVER get executed since the labels in a mini-batch typically exceed 4 chars.
if max_num_chars < 5:
# Pool of permutations to sample from. We only need the first half (if complementary option is selected)
# Special handling for max_num_chars == 4 which correctly divides the pool into the flipped halves
if max_num_chars == 4 and self.perm_mirrored:
selector = [0, 3, 4, 6, 9, 10, 12, 16, 17, 18, 19, 21]
else:
selector = list(range(max_perms))
perm_pool = torch.as_tensor(list(permutations(range(max_num_chars), max_num_chars)), device=self._device)[selector]
# If the forward permutation is always selected, no need to add it to the pool for sampling
if self.perm_forward:
perm_pool = perm_pool[1:]
perms = torch.stack(perms)
if len(perm_pool):
i = self.rng.choice(len(perm_pool), size=num_gen_perms - len(perms), replace=False)
perms = torch.cat([perms, perm_pool[i]])
else:
perms.extend([torch.randperm(max_num_chars, device=self._device) for _ in range(num_gen_perms - len(perms))])
perms = torch.stack(perms)
if self.perm_mirrored:
# Add complementary pairs
comp = perms.flip(-1)
# Stack in such a way that the pairs are next to each other.
perms = torch.stack([perms, comp]).transpose(0, 1).reshape(-1, max_num_chars)
# NOTE:
# The only meaningful way of permuting the EOS position is by moving it one character position at a time.
# However, since the number of permutations = T! and number of EOS positions = T + 1, the number of possible EOS
# positions will always be much less than the number of permutations (unless a low perm_num is set).
# Thus, it would be simpler to just train EOS using the full and null contexts rather than trying to evenly
# distribute it across the chosen number of permutations.
# Add position indices of BOS and EOS
bos_idx = perms.new_zeros((len(perms), 1))
eos_idx = perms.new_full((len(perms), 1), max_num_chars + 1)
perms = torch.cat([bos_idx, perms + 1, eos_idx], dim=1)
# Special handling for the reverse direction. This does two things:
# 1. Reverse context for the characters
# 2. Null context for [EOS] (required for learning to predict [EOS] in NAR mode)
if len(perms) > 1:
perms[1, 1:] = max_num_chars + 1 - torch.arange(max_num_chars + 1, device=self._device)
return perms
def generate_attn_masks(self, perm):
"""Generate attention masks given a sequence permutation (includes pos. for bos and eos tokens)
:param perm: the permutation sequence. i = 0 is always the BOS
:return: lookahead attention masks
"""
sz = perm.shape[0]
mask = torch.zeros((sz, sz), device=self._device)
for i in range(sz):
query_idx = perm[i]
masked_keys = perm[i + 1:]
mask[query_idx, masked_keys] = float('-inf')
content_mask = mask[:-1, :-1].clone()
mask[torch.eye(sz, dtype=torch.bool, device=self._device)] = float('-inf') # mask "self"
query_mask = mask[1:, :-1]
return content_mask, query_mask
def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
images, labels = batch
tgt = self.tokenizer.encode(labels, self._device)
# Encode the source sequence (i.e. the image codes)
memory = self.encode(images)
# Prepare the target sequences (input and output)
tgt_perms = self.gen_tgt_perms(tgt)
tgt_in = tgt[:, :-1]
tgt_out = tgt[:, 1:]
# The [EOS] token is not depended upon by any other token in any permutation ordering
tgt_padding_mask = (tgt_in == self.pad_id) | (tgt_in == self.eos_id)
loss = 0
loss_numel = 0
n = (tgt_out != self.pad_id).sum().item()
for i, perm in enumerate(tgt_perms):
tgt_mask, query_mask = self.generate_attn_masks(perm)
out = self.decode(tgt_in, memory, tgt_mask, tgt_padding_mask, tgt_query_mask=query_mask)
logits = self.head(out).flatten(end_dim=1)
loss += n * F.cross_entropy(logits, tgt_out.flatten(), ignore_index=self.pad_id)
loss_numel += n
# After the second iteration (i.e. done with canonical and reverse orderings),
# remove the [EOS] tokens for the succeeding perms
if i == 1:
tgt_out = torch.where(tgt_out == self.eos_id, self.pad_id, tgt_out)
n = (tgt_out != self.pad_id).sum().item()
loss /= loss_numel
self.log('loss', loss)
return loss
def configure_optimizers(self):
agb = self.trainer.accumulate_grad_batches
# Linear scaling so that the effective learning rate is constant regardless of the number of GPUs used with DDP.
lr_scale = agb * math.sqrt(self.trainer.num_devices) * self.batch_size / 256.
lr = lr_scale * self.lr
# optim = create_optimizer_v2(self, 'adamw', lr, self.weight_decay)
# https://github.com/mlfoundations/open_clip/blob/b4cf9269b0b11c0eea47cb16039369a46bd67449/src/training/main.py#L171
exclude = lambda n, p: p.ndim < 2 or "bn" in n or "ln" in n or "bias" in n or 'logit_scale' in n
include = lambda n, p: not exclude(n, p)
# encoder parameters
if self.adapter_type in ["ladder", "ladder_pruning"]:
encoder_params = list(self.adapter.named_parameters())
enc_gain_or_bias_params = [p for n, p in encoder_params if exclude(n, p) and p.requires_grad]
enc_rest_params = [p for n, p in encoder_params if include(n, p) and p.requires_grad]
else:
enc_gain_or_bias_params = []
enc_rest_params = []
# decoder and adapter parameters
decoder_params = list(self.decoder.named_parameters())
if self.adapter_type not in ["ladder", "ladder_pruning"]:
decoder_params += list(self.adapter.named_parameters())
dec_gain_or_bias_params = [p for n, p in decoder_params if exclude(n, p) and p.requires_grad]
dec_rest_params = [p for n, p in decoder_params if include(n, p) and p.requires_grad]
# following PARSEQ pos_queries, no weight decay
dec_gain_or_bias_params.append(self.pos_queries)
dec_gain_or_bias_params.append(self.head.bias)
dec_gain_or_bias_params.append(self.text_embed.embedding.weight)
dec_rest_params.append(self.head.weight)
rank_zero_info("[STRAdapter] The length of encoder params with and without weight decay is {} and {}, respectively.".format(
len(enc_rest_params), len(enc_gain_or_bias_params)
))
rank_zero_info("[STRAdapter] The length of decoder params with and without weight decay is {} and {}, respectively.".format(
len(dec_rest_params), len(dec_gain_or_bias_params)
))
optimizer = torch.optim.AdamW(
[
{"params": enc_gain_or_bias_params, "weight_decay": 0., 'lr': lr},
{"params": enc_rest_params, "weight_decay": self.weight_decay, 'lr': lr},
{"params": dec_gain_or_bias_params, "weight_decay": 0., 'lr': lr * self.coef_lr},
{"params": dec_rest_params, "weight_decay": self.weight_decay * self.coef_wd, 'lr': lr * self.coef_lr},
],
lr=lr, betas=(0.9, 0.98), eps=1.0e-6,
)
sched = OneCycleLR(optimizer, [lr, lr, lr * self.coef_lr, lr * self.coef_lr],
self.trainer.estimated_stepping_batches, pct_start=self.warmup_pct,
cycle_momentum=False)
return {'optimizer': optimizer, 'lr_scheduler': {'scheduler': sched, 'interval': 'step'}}
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/str_adapter/modules.py | strhub/models/str_adapter/modules.py | # coding=utf-8
import os
import copy
import math
from functools import partial
from collections import OrderedDict
from typing import Optional, Sequence, Callable, Tuple
from timm.models.helpers import named_apply
import torch
from torch import nn as nn, Tensor
from torch.nn import functional as F
from torch.nn.functional import _in_projection_packed
from torch.nn.modules import transformer
from strhub.clip.model import ResidualAttentionBlock, LayerNorm, QuickGELU
from strhub.models.utils import init_weights
class DecoderLayer(nn.Module):
"""A Transformer decoder layer supporting two-stream attention (XLNet)
This implements a pre-LN decoder, as opposed to the post-LN default in PyTorch."""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='gelu',
layer_norm_eps=1e-5):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=True)
self.cross_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=True)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm_q = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm_c = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = transformer._get_activation_fn(activation)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.gelu
super().__setstate__(state)
def forward_stream(self, tgt: Tensor, tgt_norm: Tensor, tgt_kv: Tensor, memory: Tensor, tgt_mask: Optional[Tensor],
tgt_key_padding_mask: Optional[Tensor]):
"""Forward pass for a single stream (i.e. content or query)
tgt_norm is just a LayerNorm'd tgt. Added as a separate parameter for efficiency.
Both tgt_kv and memory are expected to be LayerNorm'd too.
memory is LayerNorm'd by ViT.
"""
tgt2, sa_weights = self.self_attn(tgt_norm, tgt_kv, tgt_kv, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)
tgt = tgt + self.dropout1(tgt2)
tgt2, ca_weights = self.cross_attn(self.norm1(tgt), memory, memory)
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(self.norm2(tgt)))))
tgt = tgt + self.dropout3(tgt2)
return tgt, sa_weights, ca_weights
def forward(self, query, content, memory, query_mask: Optional[Tensor] = None, content_mask: Optional[Tensor] = None,
content_key_padding_mask: Optional[Tensor] = None, update_content: bool = True):
query_norm = self.norm_q(query)
content_norm = self.norm_c(content)
query = self.forward_stream(query, query_norm, content_norm, memory, query_mask, content_key_padding_mask)[0]
if update_content:
content = self.forward_stream(content, content_norm, content_norm, memory, content_mask,
content_key_padding_mask)[0]
return query, content
class Decoder(nn.Module):
__constants__ = ['norm']
def __init__(self, decoder_layer, num_layers, norm):
super().__init__()
self.layers = transformer._get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, query, content, memory, query_mask: Optional[Tensor] = None, content_mask: Optional[Tensor] = None,
content_key_padding_mask: Optional[Tensor] = None):
for i, mod in enumerate(self.layers):
last = i == len(self.layers) - 1
query, content = mod(query, content, memory, query_mask, content_mask, content_key_padding_mask,
update_content=not last)
query = self.norm(query)
return query
class TokenEmbedding(nn.Module):
def __init__(self, charset_size: int, embed_dim: int):
super().__init__()
self.embedding = nn.Embedding(charset_size, embed_dim)
self.embed_dim = embed_dim
def forward(self, tokens: torch.Tensor):
return math.sqrt(self.embed_dim) * self.embedding(tokens)
class Hook():
# A simple hook class that returns the input and output of a layer during forward/backward pass
def __init__(self, module, backward=False):
if backward == False:
self.hook = module.register_forward_hook(self.hook_fn)
else:
self.hook = module.register_backward_hook(self.hook_fn)
def hook_fn(self, module, input, output):
self.input = input
self.output = output
def close(self):
self.hook.remove()
class IndentityAdapter(nn.Module):
def __init__(self, *args, **kwargs):
super(IndentityAdapter, self).__init__()
self.identity = nn.Identity()
def forward(self, x):
return self.identity(x)
class LinearAdapter(nn.Module):
"""
CLIP adapter from
https://github.com/gaopengcuhk/CLIP-Adapter/blob/main/clip_adapter.py
"""
def __init__(self, c_in, reduction=2, ratio=0.2):
super(LinearAdapter, self).__init__()
self.fc = nn.Sequential(
nn.Linear(c_in, c_in // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(c_in // reduction, c_in, bias=False),
nn.ReLU(inplace=True)
)
self.ratio = ratio
# init
for m in self.modules():
init_weights(m)
def forward(self, x):
x = self.ratio * self.fc(x) + (1.0 - self.ratio) * x
return x
class AttentionAdapter(nn.Module):
def __init__(self, c_in, n_head, ratio=0.5):
super(AttentionAdapter, self).__init__()
self.attn = ResidualAttentionBlock(d_model=c_in, n_head=n_head)
self.ratio = ratio
# init
named_apply(partial(init_weights, exclude=['none']), self)
def forward(self, x):
# x [N, L, D]
x = x.permute(1, 0, 2) # NLD -> LND
x = self.ratio * self.attn(x) + (1.0 - self.ratio) * x
x = x.permute(1, 0, 2) # LND -> NLD
return x
class LadderSideAdapter(nn.Module):
def __init__(self, clip_model, block_ids=[0, 4, 8, 11], T=0.1) -> None:
"""
A Ladder Side Adapter
Args:
transformer: a transformer object defined by CLIP
block_ids: ids of blocks in the transformer
T: temperature
"""
super(LadderSideAdapter, self).__init__()
self.T = T
self.block_ids = block_ids
self.alpha = nn.Parameter(torch.zeros(len(block_ids)), requires_grad=True)
resblocks = []
self.hooks = []
for i in block_ids:
self.hooks.append(Hook(clip_model.visual.transformer.resblocks[i], backward=False))
resblocks.append(copy.deepcopy(clip_model.visual.transformer.resblocks[i]))
self.resblocks = nn.ModuleList(resblocks)
self.ln_post = copy.deepcopy(clip_model.visual.ln_post)
self.proj = copy.deepcopy(clip_model.visual.proj)
def forward(self, memory=None):
# x: LND in hooks
# ladder side forward
W = torch.sigmoid(self.alpha / self.T)
for i, j in enumerate(self.block_ids):
if i == 0:
x = self.resblocks[i](self.hooks[i].input[0])
else:
x = W[i] * self.hooks[i].input[0] + (1.0 - W[i]) * x
x = self.resblocks[i](x)
x = x.permute(1, 0, 2) # LND -> NLD
# output transformation
x = self.ln_post(x)
if self.proj is not None:
x = torch.matmul(x, self.proj)
if memory is not None:
x = W[0] * memory + (1.0 - W[0]) * x
return x
class LinearLadderSideAdapter(nn.Module):
def __init__(self, clip_model, block_ids=[1, 3, 5, 7, 9, 11], T=0.1) -> None:
"""
A Ladder Side Adapter
Args:
transformer: a transformer object defined by CLIP
block_ids: ids of blocks in the transformer
T: temperature
"""
super(LinearLadderSideAdapter, self).__init__()
self.T = T
self.block_ids = block_ids
self.alpha = nn.Parameter(torch.zeros(len(block_ids)), requires_grad=True)
width = clip_model.visual.class_embedding.shape[0]
resblocks = []
self.hooks = []
for i in block_ids:
self.hooks.append(Hook(clip_model.visual.transformer.resblocks[i], backward=False))
resblocks.append(LinearAdapter(width, reduction=4, ratio=0.5))
self.resblocks = nn.ModuleList(resblocks)
self.ln_post = copy.deepcopy(clip_model.visual.ln_post)
self.proj = copy.deepcopy(clip_model.visual.proj)
# init
named_apply(partial(init_weights, exclude=['ln_post', 'proj', 'alpha']), self)
def forward(self, memory=None):
# x: LND in hooks
# ladder side forward
W = torch.sigmoid(self.alpha / self.T)
for i, j in enumerate(self.block_ids):
if i == 0:
x = self.resblocks[i](self.hooks[i].input[0])
else:
x = W[i] * self.hooks[i].input[0] + (1.0 - W[i]) * x
x = self.resblocks[i](x)
x = x.permute(1, 0, 2) # LND -> NLD
# output transformation
x = self.ln_post(x)
if self.proj is not None:
x = torch.matmul(x, self.proj)
if memory is not None:
x = W[0] * memory + (1.0 - W[0]) * x
return x
class LadderSideAdapterPruning(nn.Module):
def __init__(self, clip_model, block_ids=[1, 3, 5, 7, 9, 11], T=0.1, reduction=4.0) -> None:
"""
A Ladder Side Adapter using the pruned model as the side network
Args:
clip_model: clip model
block_ids: ids of blocks in the transformer
T: temperature
reduction: prune the row or columns of weight matrix to (1 / reduction) of the original
"""
super(LadderSideAdapterPruning, self).__init__()
self.T = T
self.block_ids = block_ids
self.alpha = nn.Parameter(torch.zeros(len(block_ids)), requires_grad=True)
d_model = clip_model.visual.transformer.resblocks[0].attn.embed_dim
n_head = clip_model.visual.transformer.resblocks[0].attn.num_heads
resblocks = []
downsamples = []
self.hooks = []
for i in block_ids:
self.hooks.append(Hook(clip_model.visual.transformer.resblocks[i], backward=False))
# prune and init
# block = ResidualAttentionBlock(int(d_model / reduction), n_head)
block = ResidualAttentionBlockCustom(d_model, n_head, reduction=reduction)
state_dict = clip_model.visual.transformer.resblocks[i].state_dict()
# new_state_dict = self.prune(state_dict, reduction=reduction)
new_state_dict = self.prune_v2(state_dict, reduction=reduction)
block.load_state_dict(new_state_dict)
resblocks.append(block)
# downsamples.append(nn.Linear(d_model, int(d_model / reduction), bias=False))
downsamples.append(nn.Identity())
self.resblocks = nn.ModuleList(resblocks)
self.downsamples = nn.ModuleList(downsamples)
# self.upsample = nn.Linear(int(d_model / reduction), d_model, bias=False)
self.upsample = nn.Identity()
self.ln_post = copy.deepcopy(clip_model.visual.ln_post)
self.proj = copy.deepcopy(clip_model.visual.proj)
# init
named_apply(partial(init_weights, exclude=['resblocks', 'ln_post', 'proj', 'alpha']), self)
def forward(self, memory=None):
# x: LND in hooks
# ladder side forward
W = torch.sigmoid(self.alpha / self.T)
for i, j in enumerate(self.block_ids):
res_x = self.downsamples[i](self.hooks[i].input[0])
if i == 0:
x = self.resblocks[i](res_x)
else:
x = W[i] * res_x + (1.0 - W[i]) * x
x = self.resblocks[i](x)
x = self.upsample(x)
x = x.permute(1, 0, 2) # LND -> NLD
# output transformation
x = self.ln_post(x)
if self.proj is not None:
x = torch.matmul(x, self.proj)
if memory is not None:
x = W[0] * memory + (1.0 - W[0]) * x
return x
def prune(self, state_dcit, reduction):
new_sd = {}
for k, v in state_dcit.items():
if "in_proj" in k:
v_q, v_k, v_v = torch.chunk(v, 3, dim=0)
new_v_q = ln_pruning(v_q, reduction=reduction, prune_col=True)
new_v_k = ln_pruning(v_k, reduction=reduction, prune_col=True)
new_v_v = ln_pruning(v_v, reduction=reduction, prune_col=True)
new_v = torch.cat([new_v_q, new_v_k, new_v_v], dim=0).contiguous()
else:
new_v = ln_pruning(v, reduction=reduction, prune_col=True)
new_sd[k] = new_v
return new_sd
def prune_v2(self, state_dcit, reduction):
new_sd = {}
for k, v in state_dcit.items():
if "in_proj" in k:
v_q, v_k, v_v = torch.chunk(v, 3, dim=0)
new_v_q = ln_pruning(v_q, reduction=reduction, prune_col=False)
new_v_k = ln_pruning(v_k, reduction=reduction, prune_col=False)
new_v_v = ln_pruning(v_v, reduction=reduction, prune_col=False)
new_v = torch.cat([new_v_q, new_v_k, new_v_v], dim=0).contiguous()
elif "out_proj.weight" in k or "mlp.c_proj.weight" in k:
# only prune in_feature dimension
new_v = ln_pruning(v.transpose(0, 1), reduction=reduction, prune_col=False)
new_v = new_v.transpose(0, 1)
elif "mlp.c_fc" in k:
new_v = ln_pruning(v, reduction=reduction, prune_col=False)
else:
new_v = v
new_sd[k] = new_v
return new_sd
def ln_pruning(W, reduction=4.0, p=1, prune_col=False):
if W.ndim == 1:
n = W.numel()
ln_norm = torch.abs(W)
n_to_prune = int(n / reduction)
_, indices = torch.topk(ln_norm, k=n_to_prune, sorted=False)
s_indices, _ = torch.sort(indices, descending=False)
pruned_W = W[s_indices]
elif W.ndim == 2:
row, col = W.shape
# first prune the row
ln_norm = torch.norm(W, p=p, dim=1)
n_to_prune = int(row / reduction)
_, indices = torch.topk(ln_norm, k=n_to_prune, sorted=False)
s_indices, _ = torch.sort(indices, descending=False)
pruned_W = W[s_indices, ...]
if prune_col:
# then prune the column
W_ = pruned_W.transpose(0, 1)
ln_norm = torch.norm(W_, p=p, dim=1)
n_to_prune = int(col / reduction)
_, indices = torch.topk(ln_norm, k=n_to_prune, sorted=False)
s_indices, _ = torch.sort(indices, descending=False)
pruned_W = W_[s_indices, ...]
pruned_W = pruned_W.transpose(0, 1)
else:
raise NotImplementedError
return pruned_W
class ResidualAttentionBlockCustom(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, reduction = 4):
super().__init__()
self.attn = MultiheadAttentionCustom(d_model, n_head, reduction=reduction)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4 // reduction)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4 // reduction, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class MultiheadAttentionCustom(nn.Module):
def __init__(self, d_model: int, n_head: int, bias = True, attn_mask: torch.Tensor = None, reduction = 4):
super().__init__()
self.embed_dim = d_model
self.num_heads = n_head
self.in_proj_weight = nn.Parameter(torch.empty((3 * d_model // reduction, d_model)))
self.in_proj_bias = nn.Parameter(torch.empty(3 * d_model // reduction))
self.out_proj = nn.Linear(d_model // reduction, d_model, bias=bias)
self._reset_parameters()
def _reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj.bias, 0.)
def forward(self, query: Tensor, key: Tensor, value: Tensor, key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True, attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = True) -> Tuple[Tensor, Optional[Tensor]]:
# by default, length frsit in input, e.g., [L, N D]
num_heads = self.num_heads
# set up shape vars
tgt_len, bsz, embed_dim = query.shape
src_len, _, _ = key.shape
# we assume q,k,v are the same
q, k, v = _in_projection_packed(query, key, value, self.in_proj_weight, self.in_proj_bias)
head_dim = q.shape[-1] // num_heads
#
# reshape q, k, v for multihead attention and make em batch first
#
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
k = k.contiguous().view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
v = v.contiguous().view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
# update source sequence length after adjustments
src_len = k.size(1)
dropout_p = 0.0
#
# (deep breath) calculate attention and out projection
#
attn_output, attn_output_weights = _scaled_dot_product_attention(q, k, v, attn_mask, dropout_p)
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, head_dim * num_heads)
attn_output = F.linear(attn_output, self.out_proj.weight, self.out_proj.bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
# https://github.com/pytorch/pytorch/blob/71f889c7d265b9636b93ede9d651c0a9c4bee191/torch/nn/functional.py#L4809
def _scaled_dot_product_attention(
q: Tensor,
k: Tensor,
v: Tensor,
attn_mask: Optional[Tensor] = None,
dropout_p: float = 0.0,
) -> Tuple[Tensor, Tensor]:
r"""
Computes scaled dot product attention on query, key and value tensors, using
an optional attention mask if passed, and applying dropout if a probability
greater than 0.0 is specified.
Returns a tensor pair containing attended values and attention weights.
Args:
q, k, v: query, key and value tensors. See Shape section for shape details.
attn_mask: optional tensor containing mask values to be added to calculated
attention. May be 2D or 3D; see Shape section for details.
dropout_p: dropout probability. If greater than 0.0, dropout is applied.
Shape:
- q: :math:`(B, Nt, E)` where B is batch size, Nt is the target sequence length,
and E is embedding dimension.
- key: :math:`(B, Ns, E)` where B is batch size, Ns is the source sequence length,
and E is embedding dimension.
- value: :math:`(B, Ns, E)` where B is batch size, Ns is the source sequence length,
and E is embedding dimension.
- attn_mask: either a 3D tensor of shape :math:`(B, Nt, Ns)` or a 2D tensor of
shape :math:`(Nt, Ns)`.
- Output: attention values have shape :math:`(B, Nt, E)`; attention weights
have shape :math:`(B, Nt, Ns)`
"""
B, Nt, E = q.shape
q = q / math.sqrt(E)
# (B, Nt, E) x (B, E, Ns) -> (B, Nt, Ns)
attn = torch.bmm(q, k.transpose(-2, -1))
if attn_mask is not None:
attn += attn_mask
attn = F.softmax(attn, dim=-1)
if dropout_p > 0.0:
attn = F.dropout(attn, p=dropout_p)
# (B, Nt, Ns) x (B, Ns, E) -> (B, Nt, E)
output = torch.bmm(attn, v)
return output, attn
if __name__ == "__main__":
# W = torch.rand(4, 4)
# W = torch.rand(4)
# b = ln_pruning(W, reduction=2.0, p=1)
# print(W)
# print(b)
m = MultiheadAttentionCustom(768, 12, reduction=4)
m = ResidualAttentionBlockCustom(768, 12, reduction=4)
num = 0
for n, p in m.named_parameters():
num += p.numel()
print(num)
x = torch.rand(16, 4, 768)
# y = m(x, x, x)
y = m(x)
print(y[0].shape)
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/str_adapter/__init__.py | strhub/models/str_adapter/__init__.py | # coding=utf-8 | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/model_alignment.py | strhub/models/abinet/model_alignment.py | import torch
import torch.nn as nn
from .model import Model
class BaseAlignment(Model):
def __init__(self, dataset_max_length, null_label, num_classes, d_model=512, loss_weight=1.0):
super().__init__(dataset_max_length, null_label)
self.loss_weight = loss_weight
self.w_att = nn.Linear(2 * d_model, d_model)
self.cls = nn.Linear(d_model, num_classes)
def forward(self, l_feature, v_feature):
"""
Args:
l_feature: (N, T, E) where T is length, N is batch size and d is dim of model
v_feature: (N, T, E) shape the same as l_feature
"""
f = torch.cat((l_feature, v_feature), dim=2)
f_att = torch.sigmoid(self.w_att(f))
output = f_att * v_feature + (1 - f_att) * l_feature
logits = self.cls(output) # (N, T, C)
pt_lengths = self._get_length(logits)
return {'logits': logits, 'pt_lengths': pt_lengths, 'loss_weight': self.loss_weight,
'name': 'alignment'}
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/backbone.py | strhub/models/abinet/backbone.py | import torch.nn as nn
from torch.nn import TransformerEncoderLayer, TransformerEncoder
from .resnet import resnet45
from .transformer import PositionalEncoding
class ResTranformer(nn.Module):
def __init__(self, d_model=512, nhead=8, d_inner=2048, dropout=0.1, activation='relu', backbone_ln=2):
super().__init__()
self.resnet = resnet45()
self.pos_encoder = PositionalEncoding(d_model, max_len=8 * 32)
encoder_layer = TransformerEncoderLayer(d_model=d_model, nhead=nhead,
dim_feedforward=d_inner, dropout=dropout, activation=activation)
self.transformer = TransformerEncoder(encoder_layer, backbone_ln)
def forward(self, images):
feature = self.resnet(images)
n, c, h, w = feature.shape
feature = feature.view(n, c, -1).permute(2, 0, 1)
feature = self.pos_encoder(feature)
feature = self.transformer(feature)
feature = feature.permute(1, 2, 0).view(n, c, h, w)
return feature
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/model.py | strhub/models/abinet/model.py | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, dataset_max_length: int, null_label: int):
super().__init__()
self.max_length = dataset_max_length + 1 # additional stop token
self.null_label = null_label
def _get_length(self, logit, dim=-1):
""" Greed decoder to obtain length from logit"""
out = (logit.argmax(dim=-1) == self.null_label)
abn = out.any(dim)
out = ((out.cumsum(dim) == 1) & out).max(dim)[1]
out = out + 1 # additional end token
out = torch.where(abn, out, out.new_tensor(logit.shape[1], device=out.device))
return out
@staticmethod
def _get_padding_mask(length, max_length):
length = length.unsqueeze(-1)
grid = torch.arange(0, max_length, device=length.device).unsqueeze(0)
return grid >= length
@staticmethod
def _get_location_mask(sz, device=None):
mask = torch.eye(sz, device=device)
mask = mask.float().masked_fill(mask == 1, float('-inf'))
return mask
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/resnet.py | strhub/models/abinet/resnet.py | import math
from typing import Optional, Callable
import torch.nn as nn
from torchvision.models import resnet
class BasicBlock(resnet.BasicBlock):
def __init__(self, inplanes: int, planes: int, stride: int = 1, downsample: Optional[nn.Module] = None,
groups: int = 1, base_width: int = 64, dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None) -> None:
super().__init__(inplanes, planes, stride, downsample, groups, base_width, dilation, norm_layer)
self.conv1 = resnet.conv1x1(inplanes, planes)
self.conv2 = resnet.conv3x3(planes, planes, stride)
class ResNet(nn.Module):
def __init__(self, block, layers):
super().__init__()
self.inplanes = 32
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 32, layers[0], stride=2)
self.layer2 = self._make_layer(block, 64, layers[1], stride=1)
self.layer3 = self._make_layer(block, 128, layers[2], stride=2)
self.layer4 = self._make_layer(block, 256, layers[3], stride=1)
self.layer5 = self._make_layer(block, 512, layers[4], stride=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return x
def resnet45():
return ResNet(BasicBlock, [3, 4, 6, 6, 3])
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/system.py | strhub/models/abinet/system.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
from typing import Any, Tuple, List, Optional
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.optim import AdamW
from torch.optim.lr_scheduler import OneCycleLR
from pytorch_lightning.utilities.types import STEP_OUTPUT
from timm.optim.optim_factory import param_groups_weight_decay
from strhub.models.base import CrossEntropySystem
from strhub.models.utils import init_weights
from .model_abinet_iter import ABINetIterModel as Model
log = logging.getLogger(__name__)
class ABINet(CrossEntropySystem):
def __init__(self, charset_train: str, charset_test: str, max_label_length: int,
batch_size: int, lr: float, warmup_pct: float, weight_decay: float,
iter_size: int, d_model: int, nhead: int, d_inner: int, dropout: float, activation: str,
v_loss_weight: float, v_attention: str, v_attention_mode: str, v_backbone: str, v_num_layers: int,
l_loss_weight: float, l_num_layers: int, l_detach: bool, l_use_self_attn: bool,
l_lr: float, a_loss_weight: float, lm_only: bool = False, **kwargs) -> None:
super().__init__(charset_train, charset_test, batch_size, lr, warmup_pct, weight_decay)
self.scheduler = None
self.save_hyperparameters()
self.max_label_length = max_label_length
self.num_classes = len(self.tokenizer) - 2 # We don't predict <bos> nor <pad>
self.model = Model(max_label_length, self.eos_id, self.num_classes, iter_size, d_model, nhead, d_inner,
dropout, activation, v_loss_weight, v_attention, v_attention_mode, v_backbone, v_num_layers,
l_loss_weight, l_num_layers, l_detach, l_use_self_attn, a_loss_weight)
self.model.apply(init_weights)
# FIXME: doesn't support resumption from checkpoint yet
self._reset_alignment = True
self._reset_optimizers = True
self.l_lr = l_lr
self.lm_only = lm_only
# Train LM only. Freeze other submodels.
if lm_only:
self.l_lr = lr # for tuning
self.model.vision.requires_grad_(False)
self.model.alignment.requires_grad_(False)
@property
def _pretraining(self):
# In the original work, VM was pretrained for 8 epochs while full model was trained for an additional 10 epochs.
total_steps = self.trainer.estimated_stepping_batches * self.trainer.accumulate_grad_batches
return self.global_step < (8 / (8 + 10)) * total_steps
@torch.jit.ignore
def no_weight_decay(self):
return {'model.language.proj.weight'}
def _add_weight_decay(self, model: nn.Module, skip_list=()):
if self.weight_decay:
return param_groups_weight_decay(model, self.weight_decay, skip_list)
else:
return [{'params': model.parameters()}]
def configure_optimizers(self):
agb = self.trainer.accumulate_grad_batches
# Linear scaling so that the effective learning rate is constant regardless of the number of GPUs used with DDP.
lr_scale = agb * math.sqrt(self.trainer.num_devices) * self.batch_size / 256.
lr = lr_scale * self.lr
l_lr = lr_scale * self.l_lr
params = []
params.extend(self._add_weight_decay(self.model.vision))
params.extend(self._add_weight_decay(self.model.alignment))
# We use a different learning rate for the LM.
for p in self._add_weight_decay(self.model.language, ('proj.weight',)):
p['lr'] = l_lr
params.append(p)
max_lr = [p.get('lr', lr) for p in params]
optim = AdamW(params, lr)
self.scheduler = OneCycleLR(optim, max_lr, self.trainer.estimated_stepping_batches,
pct_start=self.warmup_pct, cycle_momentum=False)
return {'optimizer': optim, 'lr_scheduler': {'scheduler': self.scheduler, 'interval': 'step'}}
def forward(self, images: Tensor, max_length: Optional[int] = None) -> Tensor:
max_length = self.max_label_length if max_length is None else min(max_length, self.max_label_length)
logits = self.model.forward(images)[0]['logits']
return logits[:, :max_length + 1] # truncate
def calc_loss(self, targets, *res_lists) -> Tensor:
total_loss = 0
for res_list in res_lists:
loss = 0
if isinstance(res_list, dict):
res_list = [res_list]
for res in res_list:
logits = res['logits'].flatten(end_dim=1)
loss += F.cross_entropy(logits, targets.flatten(), ignore_index=self.pad_id)
loss /= len(res_list)
self.log('loss_' + res_list[0]['name'], loss)
total_loss += res_list[0]['loss_weight'] * loss
return total_loss
def on_train_batch_start(self, batch: Any, batch_idx: int) -> None:
if not self._pretraining and self._reset_optimizers:
log.info('Pretraining ends. Updating base LRs.')
self._reset_optimizers = False
# Make base_lr the same for all groups
base_lr = self.scheduler.base_lrs[0] # base_lr of group 0 - VM
self.scheduler.base_lrs = [base_lr] * len(self.scheduler.base_lrs)
def _prepare_inputs_and_targets(self, labels):
# Use dummy label to ensure sequence length is constant.
dummy = ['0' * self.max_label_length]
targets = self.tokenizer.encode(dummy + list(labels), self.device)[1:]
targets = targets[:, 1:] # remove <bos>. Unused here.
# Inputs are padded with eos_id
inputs = torch.where(targets == self.pad_id, self.eos_id, targets)
inputs = F.one_hot(inputs, self.num_classes).float()
lengths = torch.as_tensor(list(map(len, labels)), device=self.device) + 1 # +1 for eos
return inputs, lengths, targets
def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
images, labels = batch
inputs, lengths, targets = self._prepare_inputs_and_targets(labels)
if self.lm_only:
l_res = self.model.language(inputs, lengths)
loss = self.calc_loss(targets, l_res)
# Pretrain submodels independently first
elif self._pretraining:
# Vision
v_res = self.model.vision(images)
# Language
l_res = self.model.language(inputs, lengths)
# We also train the alignment model to 'satisfy' DDP requirements (all parameters should be used).
# We'll reset its parameters prior to joint training.
a_res = self.model.alignment(l_res['feature'].detach(), v_res['feature'].detach())
loss = self.calc_loss(targets, v_res, l_res, a_res)
else:
# Reset alignment model's parameters once prior to full model training.
if self._reset_alignment:
log.info('Pretraining ends. Resetting alignment model.')
self._reset_alignment = False
self.model.alignment.apply(init_weights)
all_a_res, all_l_res, v_res = self.model.forward(images)
loss = self.calc_loss(targets, v_res, all_l_res, all_a_res)
self.log('loss', loss)
return loss
def forward_logits_loss(self, images: Tensor, labels: List[str]) -> Tuple[Tensor, Tensor, int]:
if self.lm_only:
inputs, lengths, targets = self._prepare_inputs_and_targets(labels)
l_res = self.model.language(inputs, lengths)
loss = self.calc_loss(targets, l_res)
loss_numel = (targets != self.pad_id).sum()
return l_res['logits'], loss, loss_numel
else:
return super().forward_logits_loss(images, labels)
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/model_language.py | strhub/models/abinet/model_language.py | import torch.nn as nn
from torch.nn import TransformerDecoder
from .model import Model
from .transformer import PositionalEncoding, TransformerDecoderLayer
class BCNLanguage(Model):
def __init__(self, dataset_max_length, null_label, num_classes, d_model=512, nhead=8, d_inner=2048, dropout=0.1,
activation='relu', num_layers=4, detach=True, use_self_attn=False, loss_weight=1.0,
global_debug=False):
super().__init__(dataset_max_length, null_label)
self.detach = detach
self.loss_weight = loss_weight
self.proj = nn.Linear(num_classes, d_model, False)
self.token_encoder = PositionalEncoding(d_model, max_len=self.max_length)
self.pos_encoder = PositionalEncoding(d_model, dropout=0, max_len=self.max_length)
decoder_layer = TransformerDecoderLayer(d_model, nhead, d_inner, dropout,
activation, self_attn=use_self_attn, debug=global_debug)
self.model = TransformerDecoder(decoder_layer, num_layers)
self.cls = nn.Linear(d_model, num_classes)
def forward(self, tokens, lengths):
"""
Args:
tokens: (N, T, C) where T is length, N is batch size and C is classes number
lengths: (N,)
"""
if self.detach:
tokens = tokens.detach()
embed = self.proj(tokens) # (N, T, E)
embed = embed.permute(1, 0, 2) # (T, N, E)
embed = self.token_encoder(embed) # (T, N, E)
padding_mask = self._get_padding_mask(lengths, self.max_length)
zeros = embed.new_zeros(*embed.shape)
qeury = self.pos_encoder(zeros)
location_mask = self._get_location_mask(self.max_length, tokens.device)
output = self.model(qeury, embed,
tgt_key_padding_mask=padding_mask,
memory_mask=location_mask,
memory_key_padding_mask=padding_mask) # (T, N, E)
output = output.permute(1, 0, 2) # (N, T, E)
logits = self.cls(output) # (N, T, C)
pt_lengths = self._get_length(logits)
res = {'feature': output, 'logits': logits, 'pt_lengths': pt_lengths,
'loss_weight': self.loss_weight, 'name': 'language'}
return res
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/model_abinet_iter.py | strhub/models/abinet/model_abinet_iter.py | import torch
from torch import nn
from .model_alignment import BaseAlignment
from .model_language import BCNLanguage
from .model_vision import BaseVision
class ABINetIterModel(nn.Module):
def __init__(self, dataset_max_length, null_label, num_classes, iter_size=1,
d_model=512, nhead=8, d_inner=2048, dropout=0.1, activation='relu',
v_loss_weight=1., v_attention='position', v_attention_mode='nearest',
v_backbone='transformer', v_num_layers=2,
l_loss_weight=1., l_num_layers=4, l_detach=True, l_use_self_attn=False,
a_loss_weight=1.):
super().__init__()
self.iter_size = iter_size
self.vision = BaseVision(dataset_max_length, null_label, num_classes, v_attention, v_attention_mode,
v_loss_weight, d_model, nhead, d_inner, dropout, activation, v_backbone, v_num_layers)
self.language = BCNLanguage(dataset_max_length, null_label, num_classes, d_model, nhead, d_inner, dropout,
activation, l_num_layers, l_detach, l_use_self_attn, l_loss_weight)
self.alignment = BaseAlignment(dataset_max_length, null_label, num_classes, d_model, a_loss_weight)
def forward(self, images):
v_res = self.vision(images)
a_res = v_res
all_l_res, all_a_res = [], []
for _ in range(self.iter_size):
tokens = torch.softmax(a_res['logits'], dim=-1)
lengths = a_res['pt_lengths']
lengths.clamp_(2, self.language.max_length) # TODO:move to langauge model
l_res = self.language(tokens, lengths)
all_l_res.append(l_res)
a_res = self.alignment(l_res['feature'], v_res['feature'])
all_a_res.append(a_res)
if self.training:
return all_a_res, all_l_res, v_res
else:
return a_res, all_l_res[-1], v_res
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/__init__.py | strhub/models/abinet/__init__.py | r"""
Fang, Shancheng, Hongtao, Xie, Yuxin, Wang, Zhendong, Mao, and Yongdong, Zhang.
"Read Like Humans: Autonomous, Bidirectional and Iterative Language Modeling for Scene Text Recognition." .
In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 7098-7107).2021.
https://arxiv.org/abs/2103.06495
All source files, except `system.py`, are based on the implementation listed below,
and hence are released under the license of the original.
Source: https://github.com/FangShancheng/ABINet
License: 2-clause BSD License (see included LICENSE file)
"""
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/attention.py | strhub/models/abinet/attention.py | import torch
import torch.nn as nn
from .transformer import PositionalEncoding
class Attention(nn.Module):
def __init__(self, in_channels=512, max_length=25, n_feature=256):
super().__init__()
self.max_length = max_length
self.f0_embedding = nn.Embedding(max_length, in_channels)
self.w0 = nn.Linear(max_length, n_feature)
self.wv = nn.Linear(in_channels, in_channels)
self.we = nn.Linear(in_channels, max_length)
self.active = nn.Tanh()
self.softmax = nn.Softmax(dim=2)
def forward(self, enc_output):
enc_output = enc_output.permute(0, 2, 3, 1).flatten(1, 2)
reading_order = torch.arange(self.max_length, dtype=torch.long, device=enc_output.device)
reading_order = reading_order.unsqueeze(0).expand(enc_output.size(0), -1) # (S,) -> (B, S)
reading_order_embed = self.f0_embedding(reading_order) # b,25,512
t = self.w0(reading_order_embed.permute(0, 2, 1)) # b,512,256
t = self.active(t.permute(0, 2, 1) + self.wv(enc_output)) # b,256,512
attn = self.we(t) # b,256,25
attn = self.softmax(attn.permute(0, 2, 1)) # b,25,256
g_output = torch.bmm(attn, enc_output) # b,25,512
return g_output, attn.view(*attn.shape[:2], 8, 32)
def encoder_layer(in_c, out_c, k=3, s=2, p=1):
return nn.Sequential(nn.Conv2d(in_c, out_c, k, s, p),
nn.BatchNorm2d(out_c),
nn.ReLU(True))
def decoder_layer(in_c, out_c, k=3, s=1, p=1, mode='nearest', scale_factor=None, size=None):
align_corners = None if mode == 'nearest' else True
return nn.Sequential(nn.Upsample(size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners),
nn.Conv2d(in_c, out_c, k, s, p),
nn.BatchNorm2d(out_c),
nn.ReLU(True))
class PositionAttention(nn.Module):
def __init__(self, max_length, in_channels=512, num_channels=64,
h=8, w=32, mode='nearest', **kwargs):
super().__init__()
self.max_length = max_length
self.k_encoder = nn.Sequential(
encoder_layer(in_channels, num_channels, s=(1, 2)),
encoder_layer(num_channels, num_channels, s=(2, 2)),
encoder_layer(num_channels, num_channels, s=(2, 2)),
encoder_layer(num_channels, num_channels, s=(2, 2))
)
self.k_decoder = nn.Sequential(
decoder_layer(num_channels, num_channels, scale_factor=2, mode=mode),
decoder_layer(num_channels, num_channels, scale_factor=2, mode=mode),
decoder_layer(num_channels, num_channels, scale_factor=2, mode=mode),
decoder_layer(num_channels, in_channels, size=(h, w), mode=mode)
)
self.pos_encoder = PositionalEncoding(in_channels, dropout=0., max_len=max_length)
self.project = nn.Linear(in_channels, in_channels)
def forward(self, x):
N, E, H, W = x.size()
k, v = x, x # (N, E, H, W)
# calculate key vector
features = []
for i in range(0, len(self.k_encoder)):
k = self.k_encoder[i](k)
features.append(k)
for i in range(0, len(self.k_decoder) - 1):
k = self.k_decoder[i](k)
k = k + features[len(self.k_decoder) - 2 - i]
k = self.k_decoder[-1](k)
# calculate query vector
# TODO q=f(q,k)
zeros = x.new_zeros((self.max_length, N, E)) # (T, N, E)
q = self.pos_encoder(zeros) # (T, N, E)
q = q.permute(1, 0, 2) # (N, T, E)
q = self.project(q) # (N, T, E)
# calculate attention
attn_scores = torch.bmm(q, k.flatten(2, 3)) # (N, T, (H*W))
attn_scores = attn_scores / (E ** 0.5)
attn_scores = torch.softmax(attn_scores, dim=-1)
v = v.permute(0, 2, 3, 1).view(N, -1, E) # (N, (H*W), E)
attn_vecs = torch.bmm(attn_scores, v) # (N, T, E)
return attn_vecs, attn_scores.view(N, -1, H, W)
class PositionAttentionForSequence(nn.Module):
def __init__(self, max_length, in_channels=512, num_channels=64,
h=14, w=14, mode='nearest', **kwargs):
"""this module deals with sequence input"""
super().__init__()
self.max_length = max_length
self.k_encoder = nn.Sequential(
encoder_layer(in_channels, num_channels, s=(1, 1)),
encoder_layer(num_channels, num_channels, s=(1, 1)),
encoder_layer(num_channels, num_channels, s=(1, 1)),
encoder_layer(num_channels, num_channels, s=(1, 1))
)
self.k_decoder = nn.Sequential(
decoder_layer(num_channels, num_channels, scale_factor=1, mode=mode),
decoder_layer(num_channels, num_channels, scale_factor=1, mode=mode),
decoder_layer(num_channels, num_channels, scale_factor=1, mode=mode),
decoder_layer(num_channels, in_channels, size=(h, w), mode=mode)
)
self.pos_encoder = PositionalEncoding(in_channels, dropout=0., max_len=max_length)
self.project = nn.Linear(in_channels, in_channels)
def forward(self, x):
"""
Args:
x [N, L, E]
"""
N, L, E = x.size()
v = x # (N, L, E)
H, W = int((L - 1) ** 0.5), int((L - 1) ** 0.5)
cls_token = x[:, 0:1, :]
# [N, C, H, W]
k = x[:, 1:, :].reshape(N, H, W, -1).permute(0, 3, 1, 2).contiguous()
# calculate key vector
features = []
for i in range(0, len(self.k_encoder)):
k = self.k_encoder[i](k)
features.append(k)
for i in range(0, len(self.k_decoder) - 1):
k = self.k_decoder[i](k)
k = k + features[len(self.k_decoder) - 2 - i]
k = self.k_decoder[-1](k)
k = k.flatten(2, 3).transpose(-1, -2) # (N, (H*W), E)
k = torch.cat([cls_token, k], dim=1) # (N, L, E)
# calculate query vector
zeros = x.new_zeros((self.max_length, N, E)) # (T, N, E)
q = self.pos_encoder(zeros) # (T, N, E)
q = q.permute(1, 0, 2) # (N, T, E)
q = self.project(q) # (N, T, E)
# calculate attention
attn_scores = torch.bmm(q, k.transpose(-1, -2)) # (N, T, L)
attn_scores = attn_scores / (E ** 0.5)
attn_scores = torch.softmax(attn_scores, dim=-1)
attn_vecs = torch.bmm(attn_scores, v) # (N, T, E)
return attn_vecs, attn_scores
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/model_vision.py | strhub/models/abinet/model_vision.py | from torch import nn
from .attention import PositionAttention, Attention
from .backbone import ResTranformer
from .model import Model
from .resnet import resnet45
class BaseVision(Model):
def __init__(self, dataset_max_length, null_label, num_classes,
attention='position', attention_mode='nearest', loss_weight=1.0,
d_model=512, nhead=8, d_inner=2048, dropout=0.1, activation='relu',
backbone='transformer', backbone_ln=2):
super().__init__(dataset_max_length, null_label)
self.loss_weight = loss_weight
self.out_channels = d_model
if backbone == 'transformer':
self.backbone = ResTranformer(d_model, nhead, d_inner, dropout, activation, backbone_ln)
else:
self.backbone = resnet45()
if attention == 'position':
self.attention = PositionAttention(
max_length=self.max_length,
mode=attention_mode
)
elif attention == 'attention':
self.attention = Attention(
max_length=self.max_length,
n_feature=8 * 32,
)
else:
raise ValueError(f'invalid attention: {attention}')
self.cls = nn.Linear(self.out_channels, num_classes)
def forward(self, images):
features = self.backbone(images) # (N, E, H, W)
attn_vecs, attn_scores = self.attention(features) # (N, T, E), (N, T, H, W)
logits = self.cls(attn_vecs) # (N, T, C)
pt_lengths = self._get_length(logits)
return {'feature': attn_vecs, 'logits': logits, 'pt_lengths': pt_lengths,
'attn_scores': attn_scores, 'loss_weight': self.loss_weight, 'name': 'vision'}
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/abinet/transformer.py | strhub/models/abinet/transformer.py | import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.modules.transformer import _get_activation_fn
class TransformerDecoderLayer(nn.Module):
r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
This standard decoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
Examples::
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
>>> memory = torch.rand(10, 32, 512)
>>> tgt = torch.rand(20, 32, 512)
>>> out = decoder_layer(tgt, memory)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", self_attn=True, siamese=False, debug=False):
super().__init__()
self.has_self_attn, self.siamese = self_attn, siamese
self.debug = debug
if self.has_self_attn:
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm1 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
if self.siamese:
self.multihead_attn2 = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.activation = _get_activation_fn(activation)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super().__setstate__(state)
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None,
memory2=None, memory_mask2=None, memory_key_padding_mask2=None):
# type: (Tensor, Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor]) -> Tensor
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequence from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
if self.has_self_attn:
tgt2, attn = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
if self.debug: self.attn = attn
tgt2, attn2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
if self.debug: self.attn2 = attn2
if self.siamese:
tgt3, attn3 = self.multihead_attn2(tgt, memory2, memory2, attn_mask=memory_mask2,
key_padding_mask=memory_key_padding_mask2)
tgt = tgt + self.dropout2(tgt3)
if self.debug: self.attn3 = attn3
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
class PositionalEncoding(nn.Module):
r"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
# self.register_buffer('pe', pe)
# https://github.com/Lightning-AI/lightning/discussions/14377
# https://github.com/pytorch/pytorch/issues/68407
self.register_parameter('pe', nn.Parameter(pe, requires_grad=False))
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/parseq/system.py | strhub/models/parseq/system.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from functools import partial
from itertools import permutations
from typing import Sequence, Any, Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from pytorch_lightning.utilities.types import STEP_OUTPUT
from timm.models.helpers import named_apply
from strhub.models.base import CrossEntropySystem
from strhub.models.utils import init_weights
from strhub.models.vitstr.model import load_pretrained_weight
from .modules import DecoderLayer, Decoder, Encoder, TokenEmbedding
class PARSeq(CrossEntropySystem):
def __init__(self, charset_train: str, charset_test: str, max_label_length: int,
batch_size: int, lr: float, warmup_pct: float, weight_decay: float,
img_size: Sequence[int], patch_size: Sequence[int], embed_dim: int,
enc_num_heads: int, enc_mlp_ratio: int, enc_depth: int,
dec_num_heads: int, dec_mlp_ratio: int, dec_depth: int,
perm_num: int, perm_forward: bool, perm_mirrored: bool,
decode_ar: bool, refine_iters: int, dropout: float, **kwargs: Any) -> None:
super().__init__(charset_train, charset_test, batch_size, lr, warmup_pct, weight_decay)
self.save_hyperparameters()
self.max_label_length = max_label_length
self.decode_ar = decode_ar
self.refine_iters = refine_iters
self.encoder = Encoder(img_size, patch_size, embed_dim=embed_dim, depth=enc_depth, num_heads=enc_num_heads,
mlp_ratio=enc_mlp_ratio)
# load pretrained weights for encoder
if embed_dim == 768 and "imagenet21k_pretrained" in kwargs.keys():
load_pretrained_weight(self.encoder, kwargs["imagenet21k_pretrained"])
if embed_dim > 512:
self.proj = nn.Linear(embed_dim, 512)
embed_dim = 512
else:
self.proj = nn.Identity()
decoder_layer = DecoderLayer(embed_dim, dec_num_heads, embed_dim * dec_mlp_ratio, dropout)
self.decoder = Decoder(decoder_layer, num_layers=dec_depth, norm=nn.LayerNorm(embed_dim))
# Perm/attn mask stuff
self.rng = np.random.default_rng()
self.max_gen_perms = perm_num // 2 if perm_mirrored else perm_num
self.perm_forward = perm_forward
self.perm_mirrored = perm_mirrored
# We don't predict <bos> nor <pad>
self.head = nn.Linear(embed_dim, len(self.tokenizer) - 2)
self.text_embed = TokenEmbedding(len(self.tokenizer), embed_dim)
# +1 for <eos>
self.pos_queries = nn.Parameter(torch.Tensor(1, max_label_length + 1, embed_dim))
self.dropout = nn.Dropout(p=dropout)
# Encoder has its own init.
named_apply(partial(init_weights, exclude=['encoder']), self)
nn.init.trunc_normal_(self.pos_queries, std=.02)
@torch.jit.ignore
def no_weight_decay(self):
param_names = {'text_embed.embedding.weight', 'pos_queries'}
enc_param_names = {'encoder.' + n for n in self.encoder.no_weight_decay()}
return param_names.union(enc_param_names)
def encode(self, img: torch.Tensor):
return self.proj(self.encoder(img))
def decode(self, tgt: torch.Tensor, memory: torch.Tensor, tgt_mask: Optional[Tensor] = None,
tgt_padding_mask: Optional[Tensor] = None, tgt_query: Optional[Tensor] = None,
tgt_query_mask: Optional[Tensor] = None):
N, L = tgt.shape
# <bos> stands for the null context. We only supply position information for characters after <bos>.
null_ctx = self.text_embed(tgt[:, :1])
tgt_emb = self.pos_queries[:, :L - 1] + self.text_embed(tgt[:, 1:])
tgt_emb = self.dropout(torch.cat([null_ctx, tgt_emb], dim=1))
if tgt_query is None:
tgt_query = self.pos_queries[:, :L].expand(N, -1, -1)
tgt_query = self.dropout(tgt_query)
return self.decoder(tgt_query, tgt_emb, memory, tgt_query_mask, tgt_mask, tgt_padding_mask)
def forward(self, images: Tensor, max_length: Optional[int] = None) -> Tensor:
testing = max_length is None
max_length = self.max_label_length if max_length is None else min(max_length, self.max_label_length)
bs = images.shape[0]
# +1 for <eos> at end of sequence.
num_steps = max_length + 1
memory = self.encode(images)
# Query positions up to `num_steps`
pos_queries = self.pos_queries[:, :num_steps].expand(bs, -1, -1)
# Special case for the forward permutation. Faster than using `generate_attn_masks()`
tgt_mask = query_mask = torch.triu(torch.full((num_steps, num_steps), float('-inf'), device=self._device), 1)
if self.decode_ar:
tgt_in = torch.full((bs, num_steps), self.pad_id, dtype=torch.long, device=self._device)
tgt_in[:, 0] = self.bos_id
logits = []
for i in range(num_steps):
j = i + 1 # next token index
# Efficient decoding:
# Input the context up to the ith token. We use only one query (at position = i) at a time.
# This works because of the lookahead masking effect of the canonical (forward) AR context.
# Past tokens have no access to future tokens, hence are fixed once computed.
tgt_out = self.decode(tgt_in[:, :j], memory, tgt_mask[:j, :j], tgt_query=pos_queries[:, i:j],
tgt_query_mask=query_mask[i:j, :j])
# the next token probability is in the output's ith token position
p_i = self.head(tgt_out)
logits.append(p_i)
if j < num_steps:
# greedy decode. add the next token index to the target input
tgt_in[:, j] = p_i.squeeze().argmax(-1)
# Efficient batch decoding: If all output words have at least one EOS token, end decoding.
if testing and (tgt_in == self.eos_id).any(dim=-1).all():
break
logits = torch.cat(logits, dim=1)
else:
# No prior context, so input is just <bos>. We query all positions.
tgt_in = torch.full((bs, 1), self.bos_id, dtype=torch.long, device=self._device)
tgt_out = self.decode(tgt_in, memory, tgt_query=pos_queries)
logits = self.head(tgt_out)
if self.refine_iters:
# For iterative refinement, we always use a 'cloze' mask.
# We can derive it from the AR forward mask by unmasking the token context to the right.
query_mask[torch.triu(torch.ones(num_steps, num_steps, dtype=torch.bool, device=self._device), 2)] = 0
bos = torch.full((bs, 1), self.bos_id, dtype=torch.long, device=self._device)
for i in range(self.refine_iters):
# Prior context is the previous output.
tgt_in = torch.cat([bos, logits[:, :-1].argmax(-1)], dim=1)
tgt_padding_mask = ((tgt_in == self.eos_id).cumsum(-1) > 0) # mask tokens beyond the first EOS token.
tgt_out = self.decode(tgt_in, memory, tgt_mask, tgt_padding_mask,
tgt_query=pos_queries, tgt_query_mask=query_mask[:, :tgt_in.shape[1]])
logits = self.head(tgt_out)
return logits
def gen_tgt_perms(self, tgt):
"""Generate shared permutations for the whole batch.
This works because the same attention mask can be used for the shorter sequences
because of the padding mask.
"""
# We don't permute the position of BOS, we permute EOS separately
max_num_chars = tgt.shape[1] - 2
# Special handling for 1-character sequences
if max_num_chars == 1:
return torch.arange(3, device=self._device).unsqueeze(0)
perms = [torch.arange(max_num_chars, device=self._device)] if self.perm_forward else []
# Additional permutations if needed
max_perms = math.factorial(max_num_chars)
if self.perm_mirrored:
max_perms //= 2
num_gen_perms = min(self.max_gen_perms, max_perms)
# For 4-char sequences and shorter, we generate all permutations and sample from the pool to avoid collisions
# Note that this code path might NEVER get executed since the labels in a mini-batch typically exceed 4 chars.
if max_num_chars < 5:
# Pool of permutations to sample from. We only need the first half (if complementary option is selected)
# Special handling for max_num_chars == 4 which correctly divides the pool into the flipped halves
if max_num_chars == 4 and self.perm_mirrored:
selector = [0, 3, 4, 6, 9, 10, 12, 16, 17, 18, 19, 21]
else:
selector = list(range(max_perms))
perm_pool = torch.as_tensor(list(permutations(range(max_num_chars), max_num_chars)), device=self._device)[selector]
# If the forward permutation is always selected, no need to add it to the pool for sampling
if self.perm_forward:
perm_pool = perm_pool[1:]
perms = torch.stack(perms)
if len(perm_pool):
i = self.rng.choice(len(perm_pool), size=num_gen_perms - len(perms), replace=False)
perms = torch.cat([perms, perm_pool[i]])
else:
perms.extend([torch.randperm(max_num_chars, device=self._device) for _ in range(num_gen_perms - len(perms))])
perms = torch.stack(perms)
if self.perm_mirrored:
# Add complementary pairs
comp = perms.flip(-1)
# Stack in such a way that the pairs are next to each other.
perms = torch.stack([perms, comp]).transpose(0, 1).reshape(-1, max_num_chars)
# NOTE:
# The only meaningful way of permuting the EOS position is by moving it one character position at a time.
# However, since the number of permutations = T! and number of EOS positions = T + 1, the number of possible EOS
# positions will always be much less than the number of permutations (unless a low perm_num is set).
# Thus, it would be simpler to just train EOS using the full and null contexts rather than trying to evenly
# distribute it across the chosen number of permutations.
# Add position indices of BOS and EOS
bos_idx = perms.new_zeros((len(perms), 1))
eos_idx = perms.new_full((len(perms), 1), max_num_chars + 1)
perms = torch.cat([bos_idx, perms + 1, eos_idx], dim=1)
# Special handling for the reverse direction. This does two things:
# 1. Reverse context for the characters
# 2. Null context for [EOS] (required for learning to predict [EOS] in NAR mode)
if len(perms) > 1:
perms[1, 1:] = max_num_chars + 1 - torch.arange(max_num_chars + 1, device=self._device)
return perms
def generate_attn_masks(self, perm):
"""Generate attention masks given a sequence permutation (includes pos. for bos and eos tokens)
:param perm: the permutation sequence. i = 0 is always the BOS
:return: lookahead attention masks
"""
sz = perm.shape[0]
mask = torch.zeros((sz, sz), device=self._device)
for i in range(sz):
query_idx = perm[i]
masked_keys = perm[i + 1:]
mask[query_idx, masked_keys] = float('-inf')
content_mask = mask[:-1, :-1].clone()
mask[torch.eye(sz, dtype=torch.bool, device=self._device)] = float('-inf') # mask "self"
query_mask = mask[1:, :-1]
return content_mask, query_mask
def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
images, labels = batch
tgt = self.tokenizer.encode(labels, self._device)
# Encode the source sequence (i.e. the image codes)
memory = self.encode(images)
# Prepare the target sequences (input and output)
tgt_perms = self.gen_tgt_perms(tgt)
tgt_in = tgt[:, :-1]
tgt_out = tgt[:, 1:]
# The [EOS] token is not depended upon by any other token in any permutation ordering
tgt_padding_mask = (tgt_in == self.pad_id) | (tgt_in == self.eos_id)
loss = 0
loss_numel = 0
n = (tgt_out != self.pad_id).sum().item()
for i, perm in enumerate(tgt_perms):
tgt_mask, query_mask = self.generate_attn_masks(perm)
out = self.decode(tgt_in, memory, tgt_mask, tgt_padding_mask, tgt_query_mask=query_mask)
logits = self.head(out).flatten(end_dim=1)
loss += n * F.cross_entropy(logits, tgt_out.flatten(), ignore_index=self.pad_id)
loss_numel += n
# After the second iteration (i.e. done with canonical and reverse orderings),
# remove the [EOS] tokens for the succeeding perms
if i == 1:
tgt_out = torch.where(tgt_out == self.eos_id, self.pad_id, tgt_out)
n = (tgt_out != self.pad_id).sum().item()
loss /= loss_numel
self.log('loss', loss)
return loss
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/parseq/modules.py | strhub/models/parseq/modules.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional
import torch
from torch import nn as nn, Tensor
from torch.nn import functional as F
from torch.nn.modules import transformer
from timm.models.vision_transformer import VisionTransformer, PatchEmbed
class DecoderLayer(nn.Module):
"""A Transformer decoder layer supporting two-stream attention (XLNet)
This implements a pre-LN decoder, as opposed to the post-LN default in PyTorch."""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='gelu',
layer_norm_eps=1e-5):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=True)
self.cross_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=True)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm_q = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm_c = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = transformer._get_activation_fn(activation)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.gelu
super().__setstate__(state)
def forward_stream(self, tgt: Tensor, tgt_norm: Tensor, tgt_kv: Tensor, memory: Tensor, tgt_mask: Optional[Tensor],
tgt_key_padding_mask: Optional[Tensor]):
"""Forward pass for a single stream (i.e. content or query)
tgt_norm is just a LayerNorm'd tgt. Added as a separate parameter for efficiency.
Both tgt_kv and memory are expected to be LayerNorm'd too.
memory is LayerNorm'd by ViT.
"""
tgt2, sa_weights = self.self_attn(tgt_norm, tgt_kv, tgt_kv, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)
tgt = tgt + self.dropout1(tgt2)
tgt2, ca_weights = self.cross_attn(self.norm1(tgt), memory, memory)
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(self.norm2(tgt)))))
tgt = tgt + self.dropout3(tgt2)
return tgt, sa_weights, ca_weights
def forward(self, query, content, memory, query_mask: Optional[Tensor] = None, content_mask: Optional[Tensor] = None,
content_key_padding_mask: Optional[Tensor] = None, update_content: bool = True):
query_norm = self.norm_q(query)
content_norm = self.norm_c(content)
query = self.forward_stream(query, query_norm, content_norm, memory, query_mask, content_key_padding_mask)[0]
if update_content:
content = self.forward_stream(content, content_norm, content_norm, memory, content_mask,
content_key_padding_mask)[0]
return query, content
class Decoder(nn.Module):
__constants__ = ['norm']
def __init__(self, decoder_layer, num_layers, norm):
super().__init__()
self.layers = transformer._get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, query, content, memory, query_mask: Optional[Tensor] = None, content_mask: Optional[Tensor] = None,
content_key_padding_mask: Optional[Tensor] = None):
for i, mod in enumerate(self.layers):
last = i == len(self.layers) - 1
query, content = mod(query, content, memory, query_mask, content_mask, content_key_padding_mask,
update_content=not last)
query = self.norm(query)
return query
class Encoder(VisionTransformer):
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.,
qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed):
super().__init__(img_size, patch_size, in_chans, embed_dim=embed_dim, depth=depth, num_heads=num_heads,
mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate,
drop_path_rate=drop_path_rate, embed_layer=embed_layer,
num_classes=0, global_pool='', class_token=False) # these disable the classifier head
def forward(self, x):
# Return all tokens
return self.forward_features(x)
class TokenEmbedding(nn.Module):
def __init__(self, charset_size: int, embed_dim: int):
super().__init__()
self.embedding = nn.Embedding(charset_size, embed_dim)
self.embed_dim = embed_dim
def forward(self, tokens: torch.Tensor):
return math.sqrt(self.embed_dim) * self.embedding(tokens)
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/parseq/__init__.py | strhub/models/parseq/__init__.py | python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false | |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vitstr/model.py | strhub/models/vitstr/model.py | """
Implementation of ViTSTR based on timm VisionTransformer.
TODO:
1) distilled deit backbone
2) base deit backbone
Copyright 2021 Rowel Atienza
"""
import os
import torch
import torch.nn as nn
from pytorch_lightning.utilities import rank_zero_info
from timm.models.vision_transformer import VisionTransformer
class ViTSTR(VisionTransformer):
"""
ViTSTR is basically a ViT that uses DeiT weights.
Modified head to support a sequence of characters prediction for STR.
"""
def forward(self, x, seqlen: int = 25):
x = self.forward_features(x)
x = x[:, :seqlen]
# batch, seqlen, embsize
b, s, e = x.size()
x = x.reshape(b * s, e)
x = self.head(x).view(b, s, self.num_classes)
return x
def load_pretrained_weight(model, pretrained):
if not os.path.exists(pretrained):
rank_zero_info("[ViT] pretrained weight ({}) does not exist".format(pretrained))
rank_zero_info("[ViT] Load pretrained weights from ImageNet-21K pretrained model ({})".format(pretrained))
state_dict = torch.load(pretrained, map_location='cpu')
if 'model' in state_dict:
state_dict = state_dict['model']
if 'state_dict' in state_dict:
state_dict = state_dict['state_dict']
# diff shapes
# pos_embed torch.Size([1, 129, 768])
# pos_embed torch.Size([1, 197, 768])
# patch_embed.proj.weight torch.Size([768, 3, 4, 8])
# patch_embed.proj.weight torch.Size([768, 3, 16, 16])
# head.weight torch.Size([25, 768])
# head.weight torch.Size([21843, 768])
# head.bias torch.Size([25])
# head.bias torch.Size([21843])
# convert to fp32
new_state_dict = {}
for keys, value in state_dict.items():
new_state_dict[keys] = value.float()
# position embedding
old_pe = new_state_dict['pos_embed']
new_pe_len = model.pos_embed.shape[1]
new_state_dict['pos_embed'] = old_pe[:, :new_pe_len, ...]
# first conv
old_conv1_w = new_state_dict['patch_embed.proj.weight']
new_conv1_w_shape = model.patch_embed.proj.weight.shape
if old_conv1_w.shape != new_conv1_w_shape:
rank_zero_info("[ViT] averageing conv1 weight (patch_embed.proj) of ViT")
# average across [H, W]
kernel_size = (old_conv1_w.shape[-2] // new_conv1_w_shape[-2], old_conv1_w.shape[-1] // new_conv1_w_shape[-1])
new_state_dict['patch_embed.proj.weight'] = nn.AvgPool2d(kernel_size, stride=kernel_size)(old_conv1_w)
# head
old_proj_w = new_state_dict['head.weight']
old_proj_bias = new_state_dict['head.bias']
if not isinstance(model.head, nn.Identity):
new_output_dim = model.head.weight.shape[0]
if new_output_dim != old_proj_w.shape[1]:
rank_zero_info("[ViT] downsmaple the output head of ViT")
new_state_dict['head.weight'] = nn.AdaptiveAvgPool1d(new_output_dim)(old_proj_w.transpose(0, 1)).transpose(0,1)
if hasattr(model.head, 'bias'):
new_state_dict['head.bias'] = nn.AdaptiveAvgPool1d(new_output_dim)(old_proj_bias.unsqueeze(0)).squeeze()
else:
del new_state_dict['head.weight']
del new_state_dict['head.bias']
# cls token
if model.cls_token is None:
del new_state_dict['cls_token']
# load
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
print("missing_keys: ", missing_keys)
print("unexpected_keys: ", unexpected_keys)
# model1 = VisionTransformer(img_size=224, patch_size=16, depth=12, mlp_ratio=4, qkv_bias=True,
# embed_dim=768, num_heads=12, num_classes=21843)
# model1.load_pretrained("B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz")
# sd1 = model1.state_dict()
# torch.save(sd1, "B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.pth")
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vitstr/system.py | strhub/models/vitstr/system.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Any, Optional
import torch
from pytorch_lightning.utilities.types import STEP_OUTPUT
from pytorch_lightning.utilities import rank_zero_info
from torch import Tensor
from strhub.models.base import CrossEntropySystem
from strhub.models.utils import init_weights
from .model import ViTSTR as Model, load_pretrained_weight
class ViTSTR(CrossEntropySystem):
def __init__(self, charset_train: str, charset_test: str, max_label_length: int,
batch_size: int, lr: float, warmup_pct: float, weight_decay: float,
img_size: Sequence[int], patch_size: Sequence[int], embed_dim: int, num_heads: int,
**kwargs: Any) -> None:
super().__init__(charset_train, charset_test, batch_size, lr, warmup_pct, weight_decay)
self.save_hyperparameters()
self.max_label_length = max_label_length
# We don't predict <bos> nor <pad>
self.model = Model(img_size=img_size, patch_size=patch_size, depth=12, mlp_ratio=4, qkv_bias=True,
embed_dim=embed_dim, num_heads=num_heads, num_classes=len(self.tokenizer) - 2)
# Non-zero weight init for the head
self.model.head.apply(init_weights)
# load pretrained weights
load_pretrained_weight(self.model, kwargs["imagenet21k_pretrained"])
@torch.jit.ignore
def no_weight_decay(self):
return {'model.' + n for n in self.model.no_weight_decay()}
def forward(self, images: Tensor, max_length: Optional[int] = None) -> Tensor:
max_length = self.max_label_length if max_length is None else min(max_length, self.max_label_length)
logits = self.model.forward(images, max_length + 2) # +2 tokens for [GO] and [s]
# Truncate to conform to other models. [GO] in ViTSTR is actually used as the padding (therefore, ignored).
# First position corresponds to the class token, which is unused and ignored in the original work.
logits = logits[:, 1:]
return logits
def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
images, labels = batch
loss = self.forward_logits_loss(images, labels)[1]
self.log('loss', loss)
return loss
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/vitstr/__init__.py | strhub/models/vitstr/__init__.py | r"""
Atienza, Rowel. "Vision Transformer for Fast and Efficient Scene Text Recognition."
In International Conference on Document Analysis and Recognition (ICDAR). 2021.
https://arxiv.org/abs/2105.08582
All source files, except `system.py`, are based on the implementation listed below,
and hence are released under the license of the original.
Source: https://github.com/roatienza/deep-text-recognition-benchmark
License: Apache License 2.0 (see LICENSE file in project root)
"""
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/crnn/model.py | strhub/models/crnn/model.py | import torch.nn as nn
from strhub.models.modules import BidirectionalLSTM
class CRNN(nn.Module):
def __init__(self, img_h, nc, nclass, nh, leaky_relu=False):
super().__init__()
assert img_h % 16 == 0, 'img_h has to be a multiple of 16'
ks = [3, 3, 3, 3, 3, 3, 2]
ps = [1, 1, 1, 1, 1, 1, 0]
ss = [1, 1, 1, 1, 1, 1, 1]
nm = [64, 128, 256, 256, 512, 512, 512]
cnn = nn.Sequential()
def convRelu(i, batchNormalization=False):
nIn = nc if i == 0 else nm[i - 1]
nOut = nm[i]
cnn.add_module('conv{0}'.format(i),
nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i], bias=not batchNormalization))
if batchNormalization:
cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
if leaky_relu:
cnn.add_module('relu{0}'.format(i),
nn.LeakyReLU(0.2, inplace=True))
else:
cnn.add_module('relu{0}'.format(i), nn.ReLU(True))
convRelu(0)
cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2)) # 64x16x64
convRelu(1)
cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2)) # 128x8x32
convRelu(2, True)
convRelu(3)
cnn.add_module('pooling{0}'.format(2),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 256x4x16
convRelu(4, True)
convRelu(5)
cnn.add_module('pooling{0}'.format(3),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 512x2x16
convRelu(6, True) # 512x1x16
self.cnn = cnn
self.rnn = nn.Sequential(
BidirectionalLSTM(512, nh, nh),
BidirectionalLSTM(nh, nh, nclass))
def forward(self, input):
# conv features
conv = self.cnn(input)
b, c, h, w = conv.size()
assert h == 1, 'the height of conv must be 1'
conv = conv.squeeze(2)
conv = conv.transpose(1, 2) # [b, w, c]
# rnn features
output = self.rnn(conv)
return output
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/crnn/system.py | strhub/models/crnn/system.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Optional
from pytorch_lightning.utilities.types import STEP_OUTPUT
from torch import Tensor
from strhub.models.base import CTCSystem
from strhub.models.utils import init_weights
from .model import CRNN as Model
class CRNN(CTCSystem):
def __init__(self, charset_train: str, charset_test: str, max_label_length: int,
batch_size: int, lr: float, warmup_pct: float, weight_decay: float,
img_size: Sequence[int], hidden_size: int, leaky_relu: bool, **kwargs) -> None:
super().__init__(charset_train, charset_test, batch_size, lr, warmup_pct, weight_decay)
self.save_hyperparameters()
self.model = Model(img_size[0], 3, len(self.tokenizer), hidden_size, leaky_relu)
self.model.apply(init_weights)
def forward(self, images: Tensor, max_length: Optional[int] = None) -> Tensor:
return self.model.forward(images)
def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
images, labels = batch
loss = self.forward_logits_loss(images, labels)[1]
self.log('loss', loss)
return loss
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/crnn/__init__.py | strhub/models/crnn/__init__.py | r"""
Shi, Baoguang, Xiang Bai, and Cong Yao.
"An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition."
IEEE transactions on pattern analysis and machine intelligence 39, no. 11 (2016): 2298-2304.
https://arxiv.org/abs/1507.05717
All source files, except `system.py`, are based on the implementation listed below,
and hence are released under the license of the original.
Source: https://github.com/meijieru/crnn.pytorch
License: MIT License (see included LICENSE file)
"""
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/trba/prediction.py | strhub/models/trba/prediction.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, input_size, hidden_size, num_class, num_char_embeddings=256):
super().__init__()
self.attention_cell = AttentionCell(input_size, hidden_size, num_char_embeddings)
self.hidden_size = hidden_size
self.num_class = num_class
self.generator = nn.Linear(hidden_size, num_class)
self.char_embeddings = nn.Embedding(num_class, num_char_embeddings)
def forward(self, batch_H, text, max_label_length=25):
"""
input:
batch_H : contextual_feature H = hidden state of encoder. [batch_size x num_steps x num_class]
text : the text-index of each image. [batch_size x (max_length+1)]. +1 for [SOS] token. text[:, 0] = [SOS].
output: probability distribution at each step [batch_size x num_steps x num_class]
"""
batch_size = batch_H.size(0)
num_steps = max_label_length + 1 # +1 for [EOS] at end of sentence.
output_hiddens = batch_H.new_zeros((batch_size, num_steps, self.hidden_size), dtype=torch.float)
hidden = (batch_H.new_zeros((batch_size, self.hidden_size), dtype=torch.float),
batch_H.new_zeros((batch_size, self.hidden_size), dtype=torch.float))
if self.training:
for i in range(num_steps):
char_embeddings = self.char_embeddings(text[:, i])
# hidden : decoder's hidden s_{t-1}, batch_H : encoder's hidden H, char_embeddings : f(y_{t-1})
hidden, alpha = self.attention_cell(hidden, batch_H, char_embeddings)
output_hiddens[:, i, :] = hidden[0] # LSTM hidden index (0: hidden, 1: Cell)
probs = self.generator(output_hiddens)
else:
targets = text[0].expand(batch_size) # should be fill with [SOS] token
probs = batch_H.new_zeros((batch_size, num_steps, self.num_class), dtype=torch.float)
for i in range(num_steps):
char_embeddings = self.char_embeddings(targets)
hidden, alpha = self.attention_cell(hidden, batch_H, char_embeddings)
probs_step = self.generator(hidden[0])
probs[:, i, :] = probs_step
_, next_input = probs_step.max(1)
targets = next_input
return probs # batch_size x num_steps x num_class
class AttentionCell(nn.Module):
def __init__(self, input_size, hidden_size, num_embeddings):
super().__init__()
self.i2h = nn.Linear(input_size, hidden_size, bias=False)
self.h2h = nn.Linear(hidden_size, hidden_size) # either i2i or h2h should have bias
self.score = nn.Linear(hidden_size, 1, bias=False)
self.rnn = nn.LSTMCell(input_size + num_embeddings, hidden_size)
self.hidden_size = hidden_size
def forward(self, prev_hidden, batch_H, char_embeddings):
# [batch_size x num_encoder_step x num_channel] -> [batch_size x num_encoder_step x hidden_size]
batch_H_proj = self.i2h(batch_H)
prev_hidden_proj = self.h2h(prev_hidden[0]).unsqueeze(1)
e = self.score(torch.tanh(batch_H_proj + prev_hidden_proj)) # batch_size x num_encoder_step * 1
alpha = F.softmax(e, dim=1)
context = torch.bmm(alpha.permute(0, 2, 1), batch_H).squeeze(1) # batch_size x num_channel
concat_context = torch.cat([context, char_embeddings], 1) # batch_size x (num_channel + num_embedding)
cur_hidden = self.rnn(concat_context, prev_hidden)
return cur_hidden, alpha
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/trba/model.py | strhub/models/trba/model.py | import torch.nn as nn
from strhub.models.modules import BidirectionalLSTM
from .feature_extraction import ResNet_FeatureExtractor
from .prediction import Attention
from .transformation import TPS_SpatialTransformerNetwork
class TRBA(nn.Module):
def __init__(self, img_h, img_w, num_class, num_fiducial=20, input_channel=3, output_channel=512, hidden_size=256,
use_ctc=False):
super().__init__()
""" Transformation """
self.Transformation = TPS_SpatialTransformerNetwork(
F=num_fiducial, I_size=(img_h, img_w), I_r_size=(img_h, img_w),
I_channel_num=input_channel)
""" FeatureExtraction """
self.FeatureExtraction = ResNet_FeatureExtractor(input_channel, output_channel)
self.FeatureExtraction_output = output_channel
self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None, 1)) # Transform final (imgH/16-1) -> 1
""" Sequence modeling"""
self.SequenceModeling = nn.Sequential(
BidirectionalLSTM(self.FeatureExtraction_output, hidden_size, hidden_size),
BidirectionalLSTM(hidden_size, hidden_size, hidden_size))
self.SequenceModeling_output = hidden_size
""" Prediction """
if use_ctc:
self.Prediction = nn.Linear(self.SequenceModeling_output, num_class)
else:
self.Prediction = Attention(self.SequenceModeling_output, hidden_size, num_class)
def forward(self, image, max_label_length, text=None):
""" Transformation stage """
image = self.Transformation(image)
""" Feature extraction stage """
visual_feature = self.FeatureExtraction(image)
visual_feature = visual_feature.permute(0, 3, 1, 2) # [b, c, h, w] -> [b, w, c, h]
visual_feature = self.AdaptiveAvgPool(visual_feature) # [b, w, c, h] -> [b, w, c, 1]
visual_feature = visual_feature.squeeze(3) # [b, w, c, 1] -> [b, w, c]
""" Sequence modeling stage """
contextual_feature = self.SequenceModeling(visual_feature) # [b, num_steps, hidden_size]
""" Prediction stage """
if isinstance(self.Prediction, Attention):
prediction = self.Prediction(contextual_feature.contiguous(), text, max_label_length)
else:
prediction = self.Prediction(contextual_feature.contiguous()) # CTC
return prediction # [b, num_steps, num_class]
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/trba/system.py | strhub/models/trba/system.py | # Scene Text Recognition Model Hub
# Copyright 2022 Darwin Bautista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Sequence, Any, Optional
import torch
import torch.nn.functional as F
from pytorch_lightning.utilities.types import STEP_OUTPUT
from timm.models.helpers import named_apply
from torch import Tensor
from strhub.models.base import CrossEntropySystem, CTCSystem
from strhub.models.utils import init_weights
from .model import TRBA as Model
class TRBA(CrossEntropySystem):
def __init__(self, charset_train: str, charset_test: str, max_label_length: int,
batch_size: int, lr: float, warmup_pct: float, weight_decay: float,
img_size: Sequence[int], num_fiducial: int, output_channel: int, hidden_size: int,
**kwargs: Any) -> None:
super().__init__(charset_train, charset_test, batch_size, lr, warmup_pct, weight_decay)
self.save_hyperparameters()
self.max_label_length = max_label_length
img_h, img_w = img_size
self.model = Model(img_h, img_w, len(self.tokenizer), num_fiducial,
output_channel=output_channel, hidden_size=hidden_size, use_ctc=False)
named_apply(partial(init_weights, exclude=['Transformation.LocalizationNetwork.localization_fc2']), self.model)
@torch.jit.ignore
def no_weight_decay(self):
return {'model.Prediction.char_embeddings.weight'}
def forward(self, images: Tensor, max_length: Optional[int] = None) -> Tensor:
max_length = self.max_label_length if max_length is None else min(max_length, self.max_label_length)
text = images.new_full([1], self.bos_id, dtype=torch.long)
return self.model.forward(images, max_length, text)
def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
images, labels = batch
encoded = self.tokenizer.encode(labels, self.device)
inputs = encoded[:, :-1] # remove <eos>
targets = encoded[:, 1:] # remove <bos>
max_length = encoded.shape[1] - 2 # exclude <bos> and <eos> from count
logits = self.model.forward(images, max_length, inputs)
loss = F.cross_entropy(logits.flatten(end_dim=1), targets.flatten(), ignore_index=self.pad_id)
self.log('loss', loss)
return loss
class TRBC(CTCSystem):
def __init__(self, charset_train: str, charset_test: str, max_label_length: int,
batch_size: int, lr: float, warmup_pct: float, weight_decay: float,
img_size: Sequence[int], num_fiducial: int, output_channel: int, hidden_size: int,
**kwargs: Any) -> None:
super().__init__(charset_train, charset_test, batch_size, lr, warmup_pct, weight_decay)
self.save_hyperparameters()
self.max_label_length = max_label_length
img_h, img_w = img_size
self.model = Model(img_h, img_w, len(self.tokenizer), num_fiducial,
output_channel=output_channel, hidden_size=hidden_size, use_ctc=True)
named_apply(partial(init_weights, exclude=['Transformation.LocalizationNetwork.localization_fc2']), self.model)
def forward(self, images: Tensor, max_length: Optional[int] = None) -> Tensor:
# max_label_length is unused in CTC prediction
return self.model.forward(images, None)
def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
images, labels = batch
loss = self.forward_logits_loss(images, labels)[1]
self.log('loss', loss)
return loss
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/trba/feature_extraction.py | strhub/models/trba/feature_extraction.py | import torch.nn as nn
from torchvision.models.resnet import BasicBlock
class ResNet_FeatureExtractor(nn.Module):
""" FeatureExtractor of FAN (http://openaccess.thecvf.com/content_ICCV_2017/papers/Cheng_Focusing_Attention_Towards_ICCV_2017_paper.pdf) """
def __init__(self, input_channel, output_channel=512):
super().__init__()
self.ConvNet = ResNet(input_channel, output_channel, BasicBlock, [1, 2, 5, 3])
def forward(self, input):
return self.ConvNet(input)
class ResNet(nn.Module):
def __init__(self, input_channel, output_channel, block, layers):
super().__init__()
self.output_channel_block = [int(output_channel / 4), int(output_channel / 2), output_channel, output_channel]
self.inplanes = int(output_channel / 8)
self.conv0_1 = nn.Conv2d(input_channel, int(output_channel / 16),
kernel_size=3, stride=1, padding=1, bias=False)
self.bn0_1 = nn.BatchNorm2d(int(output_channel / 16))
self.conv0_2 = nn.Conv2d(int(output_channel / 16), self.inplanes,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn0_2 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer1 = self._make_layer(block, self.output_channel_block[0], layers[0])
self.conv1 = nn.Conv2d(self.output_channel_block[0], self.output_channel_block[
0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.output_channel_block[0])
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer2 = self._make_layer(block, self.output_channel_block[1], layers[1], stride=1)
self.conv2 = nn.Conv2d(self.output_channel_block[1], self.output_channel_block[
1], kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(self.output_channel_block[1])
self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=(2, 1), padding=(0, 1))
self.layer3 = self._make_layer(block, self.output_channel_block[2], layers[2], stride=1)
self.conv3 = nn.Conv2d(self.output_channel_block[2], self.output_channel_block[
2], kernel_size=3, stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.output_channel_block[2])
self.layer4 = self._make_layer(block, self.output_channel_block[3], layers[3], stride=1)
self.conv4_1 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[
3], kernel_size=2, stride=(2, 1), padding=(0, 1), bias=False)
self.bn4_1 = nn.BatchNorm2d(self.output_channel_block[3])
self.conv4_2 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[
3], kernel_size=2, stride=1, padding=0, bias=False)
self.bn4_2 = nn.BatchNorm2d(self.output_channel_block[3])
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv0_1(x)
x = self.bn0_1(x)
x = self.relu(x)
x = self.conv0_2(x)
x = self.bn0_2(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.maxpool3(x)
x = self.layer3(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer4(x)
x = self.conv4_1(x)
x = self.bn4_1(x)
x = self.relu(x)
x = self.conv4_2(x)
x = self.bn4_2(x)
x = self.relu(x)
return x
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
VamosC/CLIP4STR | https://github.com/VamosC/CLIP4STR/blob/7497c15d0e6d645e3dfa8c646b31e72153ece2a6/strhub/models/trba/__init__.py | strhub/models/trba/__init__.py | r"""
Baek, Jeonghun, Geewook Kim, Junyeop Lee, Sungrae Park, Dongyoon Han, Sangdoo Yun, Seong Joon Oh, and Hwalsuk Lee.
"What is wrong with scene text recognition model comparisons? dataset and model analysis."
In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4715-4723. 2019.
https://arxiv.org/abs/1904.01906
All source files, except `system.py`, are based on the implementation listed below,
and hence are released under the license of the original.
Source: https://github.com/clovaai/deep-text-recognition-benchmark
License: Apache License 2.0 (see LICENSE file in project root)
"""
| python | Apache-2.0 | 7497c15d0e6d645e3dfa8c646b31e72153ece2a6 | 2026-01-05T07:12:52.632396Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.