id
stringlengths
19
21
content
stringlengths
722
86.7k
evocodebench_data_201
# @Author : Shichao Song # @Email : song.shichao@outlook.com import json import os import random from uhgeval.dataset.base import BaseDataset class XinhuaHallucinations(BaseDataset): def __init__(self, path: str, shuffle: bool = False, seed: int = 22): self.data = [] if os.path.isfile(path): with open(path, encoding='utf-8') as f: self.data = json.load(f) if shuffle: random.seed(seed) random.shuffle(self.data) def __len__(self) -> int: return len(self.data) def __getitem__(self, key: int | slice) -> dict | list[dict]: return self.data[key] def load(self) -> list[dict]: return self.data[:] def statistics(self) -> dict: stat = {'doc': 0, 'gen': 0, 'kno': 0, 'num': 0} for type_ in stat.keys(): stat[type_] = sum([obj['type']==type_ for obj in self.data]) return stat
evocodebench_data_202
# Copyright (c) OpenMMLab. All rights reserved. import warnings from mmcv.cnn import MODELS as MMCV_MODELS from mmcv.utils import Registry from mmdet.models.builder import BACKBONES as MMDET_BACKBONES from mmdet.models.builder import DETECTORS as MMDET_DETECTORS from mmdet.models.builder import HEADS as MMDET_HEADS from mmdet.models.builder import LOSSES as MMDET_LOSSES from mmdet.models.builder import NECKS as MMDET_NECKS from mmdet.models.builder import ROI_EXTRACTORS as MMDET_ROI_EXTRACTORS from mmdet.models.builder import SHARED_HEADS as MMDET_SHARED_HEADS from mmseg.models.builder import LOSSES as MMSEG_LOSSES MODELS = Registry('models', parent=MMCV_MODELS) BACKBONES = MODELS NECKS = MODELS ROI_EXTRACTORS = MODELS SHARED_HEADS = MODELS HEADS = MODELS LOSSES = MODELS DETECTORS = MODELS VOXEL_ENCODERS = MODELS MIDDLE_ENCODERS = MODELS FUSION_LAYERS = MODELS SEGMENTORS = MODELS def build_backbone(cfg): """Build backbone.""" if cfg['type'] in BACKBONES._module_dict.keys(): return BACKBONES.build(cfg) else: return MMDET_BACKBONES.build(cfg) def build_neck(cfg): """Build neck.""" if cfg['type'] in NECKS._module_dict.keys(): return NECKS.build(cfg) else: return MMDET_NECKS.build(cfg) def build_roi_extractor(cfg): """Build RoI feature extractor.""" if cfg['type'] in ROI_EXTRACTORS._module_dict.keys(): return ROI_EXTRACTORS.build(cfg) else: return MMDET_ROI_EXTRACTORS.build(cfg) def build_shared_head(cfg): """Build shared head of detector.""" if cfg['type'] in SHARED_HEADS._module_dict.keys(): return SHARED_HEADS.build(cfg) else: return MMDET_SHARED_HEADS.build(cfg) def build_head(cfg): """Build head.""" if cfg['type'] in HEADS._module_dict.keys(): return HEADS.build(cfg) else: return MMDET_HEADS.build(cfg) def build_loss(cfg): """Build loss function.""" if cfg['type'] in LOSSES._module_dict.keys(): return LOSSES.build(cfg) elif cfg['type'] in MMDET_LOSSES._module_dict.keys(): return MMDET_LOSSES.build(cfg) else: return MMSEG_LOSSES.build(cfg) def build_detector(cfg, train_cfg=None, test_cfg=None): """Build detector.""" if train_cfg is not None or test_cfg is not None: warnings.warn( 'train_cfg and test_cfg is deprecated, ' 'please specify them in model', UserWarning) assert cfg.get('train_cfg') is None or train_cfg is None, \ 'train_cfg specified in both outer field and model field ' assert cfg.get('test_cfg') is None or test_cfg is None, \ 'test_cfg specified in both outer field and model field ' if cfg['type'] in DETECTORS._module_dict.keys(): return DETECTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) else: return MMDET_DETECTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) def build_segmentor(cfg, train_cfg=None, test_cfg=None): """Build segmentor.""" if train_cfg is not None or test_cfg is not None: warnings.warn( 'train_cfg and test_cfg is deprecated, ' 'please specify them in model', UserWarning) assert cfg.get('train_cfg') is None or train_cfg is None, \ 'train_cfg specified in both outer field and model field ' assert cfg.get('test_cfg') is None or test_cfg is None, \ 'test_cfg specified in both outer field and model field ' return SEGMENTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) def build_model(cfg, train_cfg=None, test_cfg=None): """A function warpper for building 3D detector or segmentor according to cfg. Should be deprecated in the future. """ if cfg.type in ['EncoderDecoder3D']: return build_segmentor(cfg, train_cfg=train_cfg, test_cfg=test_cfg) else: return build_detector(cfg, train_cfg=train_cfg, test_cfg=test_cfg) def build_voxel_encoder(cfg): """Build voxel encoder.""" return VOXEL_ENCODERS.build(cfg) def build_middle_encoder(cfg): """Build middle level encoder.""" return MIDDLE_ENCODERS.build(cfg) def build_fusion_layer(cfg): """Build fusion layer.""" return FUSION_LAYERS.build(cfg)
evocodebench_data_203
# Copyright (c) OpenMMLab. All rights reserved. import warnings from mmcv.cnn import MODELS as MMCV_MODELS from mmcv.utils import Registry from mmdet.models.builder import BACKBONES as MMDET_BACKBONES from mmdet.models.builder import DETECTORS as MMDET_DETECTORS from mmdet.models.builder import HEADS as MMDET_HEADS from mmdet.models.builder import LOSSES as MMDET_LOSSES from mmdet.models.builder import NECKS as MMDET_NECKS from mmdet.models.builder import ROI_EXTRACTORS as MMDET_ROI_EXTRACTORS from mmdet.models.builder import SHARED_HEADS as MMDET_SHARED_HEADS from mmseg.models.builder import LOSSES as MMSEG_LOSSES MODELS = Registry('models', parent=MMCV_MODELS) BACKBONES = MODELS NECKS = MODELS ROI_EXTRACTORS = MODELS SHARED_HEADS = MODELS HEADS = MODELS LOSSES = MODELS DETECTORS = MODELS VOXEL_ENCODERS = MODELS MIDDLE_ENCODERS = MODELS FUSION_LAYERS = MODELS SEGMENTORS = MODELS def build_backbone(cfg): """Build backbone.""" if cfg['type'] in BACKBONES._module_dict.keys(): return BACKBONES.build(cfg) else: return MMDET_BACKBONES.build(cfg) def build_neck(cfg): """Build neck.""" if cfg['type'] in NECKS._module_dict.keys(): return NECKS.build(cfg) else: return MMDET_NECKS.build(cfg) def build_roi_extractor(cfg): """Build RoI feature extractor.""" if cfg['type'] in ROI_EXTRACTORS._module_dict.keys(): return ROI_EXTRACTORS.build(cfg) else: return MMDET_ROI_EXTRACTORS.build(cfg) def build_shared_head(cfg): """Build shared head of detector.""" if cfg['type'] in SHARED_HEADS._module_dict.keys(): return SHARED_HEADS.build(cfg) else: return MMDET_SHARED_HEADS.build(cfg) def build_head(cfg): """Build head.""" if cfg['type'] in HEADS._module_dict.keys(): return HEADS.build(cfg) else: return MMDET_HEADS.build(cfg) def build_loss(cfg): """Build loss function.""" if cfg['type'] in LOSSES._module_dict.keys(): return LOSSES.build(cfg) elif cfg['type'] in MMDET_LOSSES._module_dict.keys(): return MMDET_LOSSES.build(cfg) else: return MMSEG_LOSSES.build(cfg) def build_detector(cfg, train_cfg=None, test_cfg=None): """Build detector.""" if train_cfg is not None or test_cfg is not None: warnings.warn( 'train_cfg and test_cfg is deprecated, ' 'please specify them in model', UserWarning) assert cfg.get('train_cfg') is None or train_cfg is None, \ 'train_cfg specified in both outer field and model field ' assert cfg.get('test_cfg') is None or test_cfg is None, \ 'test_cfg specified in both outer field and model field ' if cfg['type'] in DETECTORS._module_dict.keys(): return DETECTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) else: return MMDET_DETECTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) def build_segmentor(cfg, train_cfg=None, test_cfg=None): """Build segmentor.""" if train_cfg is not None or test_cfg is not None: warnings.warn( 'train_cfg and test_cfg is deprecated, ' 'please specify them in model', UserWarning) assert cfg.get('train_cfg') is None or train_cfg is None, \ 'train_cfg specified in both outer field and model field ' assert cfg.get('test_cfg') is None or test_cfg is None, \ 'test_cfg specified in both outer field and model field ' return SEGMENTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) def build_model(cfg, train_cfg=None, test_cfg=None): """A function warpper for building 3D detector or segmentor according to cfg. Should be deprecated in the future. """ if cfg.type in ['EncoderDecoder3D']: return build_segmentor(cfg, train_cfg=train_cfg, test_cfg=test_cfg) else: return build_detector(cfg, train_cfg=train_cfg, test_cfg=test_cfg) def build_voxel_encoder(cfg): """Build voxel encoder.""" return VOXEL_ENCODERS.build(cfg) def build_middle_encoder(cfg): """Build middle level encoder.""" return MIDDLE_ENCODERS.build(cfg) def build_fusion_layer(cfg): """Build fusion layer.""" return FUSION_LAYERS.build(cfg)
evocodebench_data_204
# Copyright (c) OpenMMLab. All rights reserved. import warnings from mmcv.cnn import MODELS as MMCV_MODELS from mmcv.utils import Registry from mmdet.models.builder import BACKBONES as MMDET_BACKBONES from mmdet.models.builder import DETECTORS as MMDET_DETECTORS from mmdet.models.builder import HEADS as MMDET_HEADS from mmdet.models.builder import LOSSES as MMDET_LOSSES from mmdet.models.builder import NECKS as MMDET_NECKS from mmdet.models.builder import ROI_EXTRACTORS as MMDET_ROI_EXTRACTORS from mmdet.models.builder import SHARED_HEADS as MMDET_SHARED_HEADS from mmseg.models.builder import LOSSES as MMSEG_LOSSES MODELS = Registry('models', parent=MMCV_MODELS) BACKBONES = MODELS NECKS = MODELS ROI_EXTRACTORS = MODELS SHARED_HEADS = MODELS HEADS = MODELS LOSSES = MODELS DETECTORS = MODELS VOXEL_ENCODERS = MODELS MIDDLE_ENCODERS = MODELS FUSION_LAYERS = MODELS SEGMENTORS = MODELS def build_backbone(cfg): """Build backbone.""" if cfg['type'] in BACKBONES._module_dict.keys(): return BACKBONES.build(cfg) else: return MMDET_BACKBONES.build(cfg) def build_neck(cfg): """Build neck.""" if cfg['type'] in NECKS._module_dict.keys(): return NECKS.build(cfg) else: return MMDET_NECKS.build(cfg) def build_roi_extractor(cfg): """Build RoI feature extractor.""" if cfg['type'] in ROI_EXTRACTORS._module_dict.keys(): return ROI_EXTRACTORS.build(cfg) else: return MMDET_ROI_EXTRACTORS.build(cfg) def build_shared_head(cfg): """Build shared head of detector.""" if cfg['type'] in SHARED_HEADS._module_dict.keys(): return SHARED_HEADS.build(cfg) else: return MMDET_SHARED_HEADS.build(cfg) def build_head(cfg): """Build head.""" if cfg['type'] in HEADS._module_dict.keys(): return HEADS.build(cfg) else: return MMDET_HEADS.build(cfg) def build_loss(cfg): """Build loss function.""" if cfg['type'] in LOSSES._module_dict.keys(): return LOSSES.build(cfg) elif cfg['type'] in MMDET_LOSSES._module_dict.keys(): return MMDET_LOSSES.build(cfg) else: return MMSEG_LOSSES.build(cfg) def build_detector(cfg, train_cfg=None, test_cfg=None): """Build detector.""" if train_cfg is not None or test_cfg is not None: warnings.warn( 'train_cfg and test_cfg is deprecated, ' 'please specify them in model', UserWarning) assert cfg.get('train_cfg') is None or train_cfg is None, \ 'train_cfg specified in both outer field and model field ' assert cfg.get('test_cfg') is None or test_cfg is None, \ 'test_cfg specified in both outer field and model field ' if cfg['type'] in DETECTORS._module_dict.keys(): return DETECTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) else: return MMDET_DETECTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) def build_segmentor(cfg, train_cfg=None, test_cfg=None): """Build segmentor.""" if train_cfg is not None or test_cfg is not None: warnings.warn( 'train_cfg and test_cfg is deprecated, ' 'please specify them in model', UserWarning) assert cfg.get('train_cfg') is None or train_cfg is None, \ 'train_cfg specified in both outer field and model field ' assert cfg.get('test_cfg') is None or test_cfg is None, \ 'test_cfg specified in both outer field and model field ' return SEGMENTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) def build_model(cfg, train_cfg=None, test_cfg=None): """A function warpper for building 3D detector or segmentor according to cfg. Should be deprecated in the future. """ if cfg.type in ['EncoderDecoder3D']: return build_segmentor(cfg, train_cfg=train_cfg, test_cfg=test_cfg) else: return build_detector(cfg, train_cfg=train_cfg, test_cfg=test_cfg) def build_voxel_encoder(cfg): """Build voxel encoder.""" return VOXEL_ENCODERS.build(cfg) def build_middle_encoder(cfg): """Build middle level encoder.""" return MIDDLE_ENCODERS.build(cfg) def build_fusion_layer(cfg): """Build fusion layer.""" return FUSION_LAYERS.build(cfg)
evocodebench_data_205
# Copyright (c) OpenMMLab. All rights reserved. import warnings from mmcv.cnn import MODELS as MMCV_MODELS from mmcv.utils import Registry from mmdet.models.builder import BACKBONES as MMDET_BACKBONES from mmdet.models.builder import DETECTORS as MMDET_DETECTORS from mmdet.models.builder import HEADS as MMDET_HEADS from mmdet.models.builder import LOSSES as MMDET_LOSSES from mmdet.models.builder import NECKS as MMDET_NECKS from mmdet.models.builder import ROI_EXTRACTORS as MMDET_ROI_EXTRACTORS from mmdet.models.builder import SHARED_HEADS as MMDET_SHARED_HEADS from mmseg.models.builder import LOSSES as MMSEG_LOSSES MODELS = Registry('models', parent=MMCV_MODELS) BACKBONES = MODELS NECKS = MODELS ROI_EXTRACTORS = MODELS SHARED_HEADS = MODELS HEADS = MODELS LOSSES = MODELS DETECTORS = MODELS VOXEL_ENCODERS = MODELS MIDDLE_ENCODERS = MODELS FUSION_LAYERS = MODELS SEGMENTORS = MODELS def build_backbone(cfg): """Build backbone.""" if cfg['type'] in BACKBONES._module_dict.keys(): return BACKBONES.build(cfg) else: return MMDET_BACKBONES.build(cfg) def build_neck(cfg): """Build neck.""" if cfg['type'] in NECKS._module_dict.keys(): return NECKS.build(cfg) else: return MMDET_NECKS.build(cfg) def build_roi_extractor(cfg): """Build RoI feature extractor.""" if cfg['type'] in ROI_EXTRACTORS._module_dict.keys(): return ROI_EXTRACTORS.build(cfg) else: return MMDET_ROI_EXTRACTORS.build(cfg) def build_shared_head(cfg): """Build shared head of detector.""" if cfg['type'] in SHARED_HEADS._module_dict.keys(): return SHARED_HEADS.build(cfg) else: return MMDET_SHARED_HEADS.build(cfg) def build_head(cfg): """Build head.""" if cfg['type'] in HEADS._module_dict.keys(): return HEADS.build(cfg) else: return MMDET_HEADS.build(cfg) def build_loss(cfg): """Build loss function.""" if cfg['type'] in LOSSES._module_dict.keys(): return LOSSES.build(cfg) elif cfg['type'] in MMDET_LOSSES._module_dict.keys(): return MMDET_LOSSES.build(cfg) else: return MMSEG_LOSSES.build(cfg) def build_detector(cfg, train_cfg=None, test_cfg=None): """Build detector.""" if train_cfg is not None or test_cfg is not None: warnings.warn( 'train_cfg and test_cfg is deprecated, ' 'please specify them in model', UserWarning) assert cfg.get('train_cfg') is None or train_cfg is None, \ 'train_cfg specified in both outer field and model field ' assert cfg.get('test_cfg') is None or test_cfg is None, \ 'test_cfg specified in both outer field and model field ' if cfg['type'] in DETECTORS._module_dict.keys(): return DETECTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) else: return MMDET_DETECTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) def build_segmentor(cfg, train_cfg=None, test_cfg=None): """Build segmentor.""" if train_cfg is not None or test_cfg is not None: warnings.warn( 'train_cfg and test_cfg is deprecated, ' 'please specify them in model', UserWarning) assert cfg.get('train_cfg') is None or train_cfg is None, \ 'train_cfg specified in both outer field and model field ' assert cfg.get('test_cfg') is None or test_cfg is None, \ 'test_cfg specified in both outer field and model field ' return SEGMENTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) def build_model(cfg, train_cfg=None, test_cfg=None): """A function warpper for building 3D detector or segmentor according to cfg. Should be deprecated in the future. """ if cfg.type in ['EncoderDecoder3D']: return build_segmentor(cfg, train_cfg=train_cfg, test_cfg=test_cfg) else: return build_detector(cfg, train_cfg=train_cfg, test_cfg=test_cfg) def build_voxel_encoder(cfg): """Build voxel encoder.""" return VOXEL_ENCODERS.build(cfg) def build_middle_encoder(cfg): """Build middle level encoder.""" return MIDDLE_ENCODERS.build(cfg) def build_fusion_layer(cfg): """Build fusion layer.""" return FUSION_LAYERS.build(cfg)
evocodebench_data_206
# Copyright (c) OpenMMLab. All rights reserved. import warnings from mmcv.cnn import MODELS as MMCV_MODELS from mmcv.utils import Registry from mmdet.models.builder import BACKBONES as MMDET_BACKBONES from mmdet.models.builder import DETECTORS as MMDET_DETECTORS from mmdet.models.builder import HEADS as MMDET_HEADS from mmdet.models.builder import LOSSES as MMDET_LOSSES from mmdet.models.builder import NECKS as MMDET_NECKS from mmdet.models.builder import ROI_EXTRACTORS as MMDET_ROI_EXTRACTORS from mmdet.models.builder import SHARED_HEADS as MMDET_SHARED_HEADS from mmseg.models.builder import LOSSES as MMSEG_LOSSES MODELS = Registry('models', parent=MMCV_MODELS) BACKBONES = MODELS NECKS = MODELS ROI_EXTRACTORS = MODELS SHARED_HEADS = MODELS HEADS = MODELS LOSSES = MODELS DETECTORS = MODELS VOXEL_ENCODERS = MODELS MIDDLE_ENCODERS = MODELS FUSION_LAYERS = MODELS SEGMENTORS = MODELS def build_backbone(cfg): """Build backbone.""" if cfg['type'] in BACKBONES._module_dict.keys(): return BACKBONES.build(cfg) else: return MMDET_BACKBONES.build(cfg) def build_neck(cfg): """Build neck.""" if cfg['type'] in NECKS._module_dict.keys(): return NECKS.build(cfg) else: return MMDET_NECKS.build(cfg) def build_roi_extractor(cfg): """Build RoI feature extractor.""" if cfg['type'] in ROI_EXTRACTORS._module_dict.keys(): return ROI_EXTRACTORS.build(cfg) else: return MMDET_ROI_EXTRACTORS.build(cfg) def build_shared_head(cfg): """Build shared head of detector.""" if cfg['type'] in SHARED_HEADS._module_dict.keys(): return SHARED_HEADS.build(cfg) else: return MMDET_SHARED_HEADS.build(cfg) def build_head(cfg): """Build head.""" if cfg['type'] in HEADS._module_dict.keys(): return HEADS.build(cfg) else: return MMDET_HEADS.build(cfg) def build_loss(cfg): """Build loss function.""" if cfg['type'] in LOSSES._module_dict.keys(): return LOSSES.build(cfg) elif cfg['type'] in MMDET_LOSSES._module_dict.keys(): return MMDET_LOSSES.build(cfg) else: return MMSEG_LOSSES.build(cfg) def build_detector(cfg, train_cfg=None, test_cfg=None): """Build detector.""" if train_cfg is not None or test_cfg is not None: warnings.warn( 'train_cfg and test_cfg is deprecated, ' 'please specify them in model', UserWarning) assert cfg.get('train_cfg') is None or train_cfg is None, \ 'train_cfg specified in both outer field and model field ' assert cfg.get('test_cfg') is None or test_cfg is None, \ 'test_cfg specified in both outer field and model field ' if cfg['type'] in DETECTORS._module_dict.keys(): return DETECTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) else: return MMDET_DETECTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) def build_segmentor(cfg, train_cfg=None, test_cfg=None): """Build segmentor.""" if train_cfg is not None or test_cfg is not None: warnings.warn( 'train_cfg and test_cfg is deprecated, ' 'please specify them in model', UserWarning) assert cfg.get('train_cfg') is None or train_cfg is None, \ 'train_cfg specified in both outer field and model field ' assert cfg.get('test_cfg') is None or test_cfg is None, \ 'test_cfg specified in both outer field and model field ' return SEGMENTORS.build( cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) def build_model(cfg, train_cfg=None, test_cfg=None): """A function warpper for building 3D detector or segmentor according to cfg. Should be deprecated in the future. """ if cfg.type in ['EncoderDecoder3D']: return build_segmentor(cfg, train_cfg=train_cfg, test_cfg=test_cfg) else: return build_detector(cfg, train_cfg=train_cfg, test_cfg=test_cfg) def build_voxel_encoder(cfg): """Build voxel encoder.""" return VOXEL_ENCODERS.build(cfg) def build_middle_encoder(cfg): """Build middle level encoder.""" return MIDDLE_ENCODERS.build(cfg) def build_fusion_layer(cfg): """Build fusion layer.""" return FUSION_LAYERS.build(cfg)
evocodebench_data_207
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmcv.utils import print_log from terminaltables import AsciiTable def average_precision(recalls, precisions, mode='area'): """Calculate average precision (for single or multiple scales). Args: recalls (np.ndarray): Recalls with shape of (num_scales, num_dets) or (num_dets, ). precisions (np.ndarray): Precisions with shape of (num_scales, num_dets) or (num_dets, ). mode (str): 'area' or '11points', 'area' means calculating the area under precision-recall curve, '11points' means calculating the average precision of recalls at [0, 0.1, ..., 1] Returns: float or np.ndarray: Calculated average precision. """ if recalls.ndim == 1: recalls = recalls[np.newaxis, :] precisions = precisions[np.newaxis, :] assert recalls.shape == precisions.shape assert recalls.ndim == 2 num_scales = recalls.shape[0] ap = np.zeros(num_scales, dtype=np.float32) if mode == 'area': zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) ones = np.ones((num_scales, 1), dtype=recalls.dtype) mrec = np.hstack((zeros, recalls, ones)) mpre = np.hstack((zeros, precisions, zeros)) for i in range(mpre.shape[1] - 1, 0, -1): mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) for i in range(num_scales): ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] ap[i] = np.sum( (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) elif mode == '11points': for i in range(num_scales): for thr in np.arange(0, 1 + 1e-3, 0.1): precs = precisions[i, recalls[i, :] >= thr] prec = precs.max() if precs.size > 0 else 0 ap[i] += prec ap /= 11 else: raise ValueError( 'Unrecognized mode, only "area" and "11points" are supported') return ap def eval_det_cls(pred, gt, iou_thr=None): """Generic functions to compute precision/recall for object detection for a single class. Args: pred (dict): Predictions mapping from image id to bounding boxes and scores. gt (dict): Ground truths mapping from image id to bounding boxes. iou_thr (list[float]): A list of iou thresholds. Return: tuple (np.ndarray, np.ndarray, float): Recalls, precisions and average precision. """ # {img_id: {'bbox': box structure, 'det': matched list}} class_recs = {} npos = 0 for img_id in gt.keys(): cur_gt_num = len(gt[img_id]) if cur_gt_num != 0: gt_cur = torch.zeros([cur_gt_num, 7], dtype=torch.float32) for i in range(cur_gt_num): gt_cur[i] = gt[img_id][i].tensor bbox = gt[img_id][0].new_box(gt_cur) else: bbox = gt[img_id] det = [[False] * len(bbox) for i in iou_thr] npos += len(bbox) class_recs[img_id] = {'bbox': bbox, 'det': det} # construct dets image_ids = [] confidence = [] ious = [] for img_id in pred.keys(): cur_num = len(pred[img_id]) if cur_num == 0: continue pred_cur = torch.zeros((cur_num, 7), dtype=torch.float32) box_idx = 0 for box, score in pred[img_id]: image_ids.append(img_id) confidence.append(score) pred_cur[box_idx] = box.tensor box_idx += 1 pred_cur = box.new_box(pred_cur) gt_cur = class_recs[img_id]['bbox'] if len(gt_cur) > 0: # calculate iou in each image iou_cur = pred_cur.overlaps(pred_cur, gt_cur) for i in range(cur_num): ious.append(iou_cur[i]) else: for i in range(cur_num): ious.append(np.zeros(1)) confidence = np.array(confidence) # sort by confidence sorted_ind = np.argsort(-confidence) image_ids = [image_ids[x] for x in sorted_ind] ious = [ious[x] for x in sorted_ind] # go down dets and mark TPs and FPs nd = len(image_ids) tp_thr = [np.zeros(nd) for i in iou_thr] fp_thr = [np.zeros(nd) for i in iou_thr] for d in range(nd): R = class_recs[image_ids[d]] iou_max = -np.inf BBGT = R['bbox'] cur_iou = ious[d] if len(BBGT) > 0: # compute overlaps for j in range(len(BBGT)): # iou = get_iou_main(get_iou_func, (bb, BBGT[j,...])) iou = cur_iou[j] if iou > iou_max: iou_max = iou jmax = j for iou_idx, thresh in enumerate(iou_thr): if iou_max > thresh: if not R['det'][iou_idx][jmax]: tp_thr[iou_idx][d] = 1. R['det'][iou_idx][jmax] = 1 else: fp_thr[iou_idx][d] = 1. else: fp_thr[iou_idx][d] = 1. ret = [] for iou_idx, thresh in enumerate(iou_thr): # compute precision recall fp = np.cumsum(fp_thr[iou_idx]) tp = np.cumsum(tp_thr[iou_idx]) recall = tp / float(npos) # avoid divide by zero in case the first detection matches a difficult # ground truth precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) ap = average_precision(recall, precision) ret.append((recall, precision, ap)) return ret def eval_map_recall(pred, gt, ovthresh=None): """Evaluate mAP and recall. Generic functions to compute precision/recall for object detection for multiple classes. Args: pred (dict): Information of detection results, which maps class_id and predictions. gt (dict): Information of ground truths, which maps class_id and ground truths. ovthresh (list[float], optional): iou threshold. Default: None. Return: tuple[dict]: dict results of recall, AP, and precision for all classes. """ ret_values = {} for classname in gt.keys(): if classname in pred: ret_values[classname] = eval_det_cls(pred[classname], gt[classname], ovthresh) recall = [{} for i in ovthresh] precision = [{} for i in ovthresh] ap = [{} for i in ovthresh] for label in gt.keys(): for iou_idx, thresh in enumerate(ovthresh): if label in pred: recall[iou_idx][label], precision[iou_idx][label], ap[iou_idx][ label] = ret_values[label][iou_idx] else: recall[iou_idx][label] = np.zeros(1) precision[iou_idx][label] = np.zeros(1) ap[iou_idx][label] = np.zeros(1) return recall, precision, ap def indoor_eval(gt_annos, dt_annos, metric, label2cat, logger=None, box_type_3d=None, box_mode_3d=None): """Indoor Evaluation. Evaluate the result of the detection. Args: gt_annos (list[dict]): Ground truth annotations. dt_annos (list[dict]): Detection annotations. the dict includes the following keys - labels_3d (torch.Tensor): Labels of boxes. - boxes_3d (:obj:`BaseInstance3DBoxes`): 3D bounding boxes in Depth coordinate. - scores_3d (torch.Tensor): Scores of boxes. metric (list[float]): IoU thresholds for computing average precisions. label2cat (dict): Map from label to category. logger (logging.Logger | str, optional): The way to print the mAP summary. See `mmdet.utils.print_log()` for details. Default: None. Return: dict[str, float]: Dict of results. """ assert len(dt_annos) == len(gt_annos) pred = {} # map {class_id: pred} gt = {} # map {class_id: gt} for img_id in range(len(dt_annos)): # parse detected annotations det_anno = dt_annos[img_id] for i in range(len(det_anno['labels_3d'])): label = det_anno['labels_3d'].numpy()[i] bbox = det_anno['boxes_3d'].convert_to(box_mode_3d)[i] score = det_anno['scores_3d'].numpy()[i] if label not in pred: pred[int(label)] = {} if img_id not in pred[label]: pred[int(label)][img_id] = [] if label not in gt: gt[int(label)] = {} if img_id not in gt[label]: gt[int(label)][img_id] = [] pred[int(label)][img_id].append((bbox, score)) # parse gt annotations gt_anno = gt_annos[img_id] if gt_anno['gt_num'] != 0: gt_boxes = box_type_3d( gt_anno['gt_boxes_upright_depth'], box_dim=gt_anno['gt_boxes_upright_depth'].shape[-1], origin=(0.5, 0.5, 0.5)).convert_to(box_mode_3d) labels_3d = gt_anno['class'] else: gt_boxes = box_type_3d(np.array([], dtype=np.float32)) labels_3d = np.array([], dtype=np.int64) for i in range(len(labels_3d)): label = labels_3d[i] bbox = gt_boxes[i] if label not in gt: gt[label] = {} if img_id not in gt[label]: gt[label][img_id] = [] gt[label][img_id].append(bbox) rec, prec, ap = eval_map_recall(pred, gt, metric) ret_dict = dict() header = ['classes'] table_columns = [[label2cat[label] for label in ap[0].keys()] + ['Overall']] for i, iou_thresh in enumerate(metric): header.append(f'AP_{iou_thresh:.2f}') header.append(f'AR_{iou_thresh:.2f}') rec_list = [] for label in ap[i].keys(): ret_dict[f'{label2cat[label]}_AP_{iou_thresh:.2f}'] = float( ap[i][label][0]) ret_dict[f'mAP_{iou_thresh:.2f}'] = float( np.mean(list(ap[i].values()))) table_columns.append(list(map(float, list(ap[i].values())))) table_columns[-1] += [ret_dict[f'mAP_{iou_thresh:.2f}']] table_columns[-1] = [f'{x:.4f}' for x in table_columns[-1]] for label in rec[i].keys(): ret_dict[f'{label2cat[label]}_rec_{iou_thresh:.2f}'] = float( rec[i][label][-1]) rec_list.append(rec[i][label][-1]) ret_dict[f'mAR_{iou_thresh:.2f}'] = float(np.mean(rec_list)) table_columns.append(list(map(float, rec_list))) table_columns[-1] += [ret_dict[f'mAR_{iou_thresh:.2f}']] table_columns[-1] = [f'{x:.4f}' for x in table_columns[-1]] table_data = [header] table_rows = list(zip(*table_columns)) table_data += table_rows table = AsciiTable(table_data) table.inner_footing_row_border = True print_log('\n' + table.table, logger=logger) return ret_dict
evocodebench_data_208
# Copyright (c) OpenMMLab. All rights reserved. from logging import warning import numpy as np import torch from mmdet3d.core.utils import array_converter @array_converter(apply_to=('val', )) def limit_period(val, offset=0.5, period=np.pi): """Limit the value into a period for periodic function. Args: val (torch.Tensor | np.ndarray): The value to be converted. offset (float, optional): Offset to set the value range. Defaults to 0.5. period ([type], optional): Period of the value. Defaults to np.pi. Returns: (torch.Tensor | np.ndarray): Value in the range of [-offset * period, (1-offset) * period] """ limited_val = val - torch.floor(val / period + offset) * period return limited_val @array_converter(apply_to=('points', 'angles')) def rotation_3d_in_axis(points, angles, axis=0, return_mat=False, clockwise=False): """Rotate points by angles according to axis. Args: points (np.ndarray | torch.Tensor | list | tuple ): Points of shape (N, M, 3). angles (np.ndarray | torch.Tensor | list | tuple | float): Vector of angles in shape (N,) axis (int, optional): The axis to be rotated. Defaults to 0. return_mat: Whether or not return the rotation matrix (transposed). Defaults to False. clockwise: Whether the rotation is clockwise. Defaults to False. Raises: ValueError: when the axis is not in range [0, 1, 2], it will raise value error. Returns: (torch.Tensor | np.ndarray): Rotated points in shape (N, M, 3). """ batch_free = len(points.shape) == 2 if batch_free: points = points[None] if isinstance(angles, float) or len(angles.shape) == 0: angles = torch.full(points.shape[:1], angles) assert len(points.shape) == 3 and len(angles.shape) == 1 \ and points.shape[0] == angles.shape[0], f'Incorrect shape of points ' \ f'angles: {points.shape}, {angles.shape}' assert points.shape[-1] in [2, 3], \ f'Points size should be 2 or 3 instead of {points.shape[-1]}' rot_sin = torch.sin(angles) rot_cos = torch.cos(angles) ones = torch.ones_like(rot_cos) zeros = torch.zeros_like(rot_cos) if points.shape[-1] == 3: if axis == 1 or axis == -2: rot_mat_T = torch.stack([ torch.stack([rot_cos, zeros, -rot_sin]), torch.stack([zeros, ones, zeros]), torch.stack([rot_sin, zeros, rot_cos]) ]) elif axis == 2 or axis == -1: rot_mat_T = torch.stack([ torch.stack([rot_cos, rot_sin, zeros]), torch.stack([-rot_sin, rot_cos, zeros]), torch.stack([zeros, zeros, ones]) ]) elif axis == 0 or axis == -3: rot_mat_T = torch.stack([ torch.stack([ones, zeros, zeros]), torch.stack([zeros, rot_cos, rot_sin]), torch.stack([zeros, -rot_sin, rot_cos]) ]) else: raise ValueError(f'axis should in range ' f'[-3, -2, -1, 0, 1, 2], got {axis}') else: rot_mat_T = torch.stack([ torch.stack([rot_cos, rot_sin]), torch.stack([-rot_sin, rot_cos]) ]) if clockwise: rot_mat_T = rot_mat_T.transpose(0, 1) if points.shape[0] == 0: points_new = points else: points_new = torch.einsum('aij,jka->aik', points, rot_mat_T) if batch_free: points_new = points_new.squeeze(0) if return_mat: rot_mat_T = torch.einsum('jka->ajk', rot_mat_T) if batch_free: rot_mat_T = rot_mat_T.squeeze(0) return points_new, rot_mat_T else: return points_new @array_converter(apply_to=('boxes_xywhr', )) def xywhr2xyxyr(boxes_xywhr): """Convert a rotated boxes in XYWHR format to XYXYR format. Args: boxes_xywhr (torch.Tensor | np.ndarray): Rotated boxes in XYWHR format. Returns: (torch.Tensor | np.ndarray): Converted boxes in XYXYR format. """ boxes = torch.zeros_like(boxes_xywhr) half_w = boxes_xywhr[..., 2] / 2 half_h = boxes_xywhr[..., 3] / 2 boxes[..., 0] = boxes_xywhr[..., 0] - half_w boxes[..., 1] = boxes_xywhr[..., 1] - half_h boxes[..., 2] = boxes_xywhr[..., 0] + half_w boxes[..., 3] = boxes_xywhr[..., 1] + half_h boxes[..., 4] = boxes_xywhr[..., 4] return boxes def get_box_type(box_type): """Get the type and mode of box structure. Args: box_type (str): The type of box structure. The valid value are "LiDAR", "Camera", or "Depth". Raises: ValueError: A ValueError is raised when `box_type` does not belong to the three valid types. Returns: tuple: Box type and box mode. """ from .box_3d_mode import (Box3DMode, CameraInstance3DBoxes, DepthInstance3DBoxes, LiDARInstance3DBoxes) box_type_lower = box_type.lower() if box_type_lower == 'lidar': box_type_3d = LiDARInstance3DBoxes box_mode_3d = Box3DMode.LIDAR elif box_type_lower == 'camera': box_type_3d = CameraInstance3DBoxes box_mode_3d = Box3DMode.CAM elif box_type_lower == 'depth': box_type_3d = DepthInstance3DBoxes box_mode_3d = Box3DMode.DEPTH else: raise ValueError('Only "box_type" of "camera", "lidar", "depth"' f' are supported, got {box_type}') return box_type_3d, box_mode_3d @array_converter(apply_to=('points_3d', 'proj_mat')) def points_cam2img(points_3d, proj_mat, with_depth=False): """Project points in camera coordinates to image coordinates. Args: points_3d (torch.Tensor | np.ndarray): Points in shape (N, 3) proj_mat (torch.Tensor | np.ndarray): Transformation matrix between coordinates. with_depth (bool, optional): Whether to keep depth in the output. Defaults to False. Returns: (torch.Tensor | np.ndarray): Points in image coordinates, with shape [N, 2] if `with_depth=False`, else [N, 3]. """ points_shape = list(points_3d.shape) points_shape[-1] = 1 assert len(proj_mat.shape) == 2, 'The dimension of the projection'\ f' matrix should be 2 instead of {len(proj_mat.shape)}.' d1, d2 = proj_mat.shape[:2] assert (d1 == 3 and d2 == 3) or (d1 == 3 and d2 == 4) or ( d1 == 4 and d2 == 4), 'The shape of the projection matrix'\ f' ({d1}*{d2}) is not supported.' if d1 == 3: proj_mat_expanded = torch.eye( 4, device=proj_mat.device, dtype=proj_mat.dtype) proj_mat_expanded[:d1, :d2] = proj_mat proj_mat = proj_mat_expanded # previous implementation use new_zeros, new_one yields better results points_4 = torch.cat([points_3d, points_3d.new_ones(points_shape)], dim=-1) point_2d = points_4 @ proj_mat.T point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] if with_depth: point_2d_res = torch.cat([point_2d_res, point_2d[..., 2:3]], dim=-1) return point_2d_res @array_converter(apply_to=('points', 'cam2img')) def points_img2cam(points, cam2img): """Project points in image coordinates to camera coordinates. Args: points (torch.Tensor): 2.5D points in 2D images, [N, 3], 3 corresponds with x, y in the image and depth. cam2img (torch.Tensor): Camera intrinsic matrix. The shape can be [3, 3], [3, 4] or [4, 4]. Returns: torch.Tensor: points in 3D space. [N, 3], 3 corresponds with x, y, z in 3D space. """ assert cam2img.shape[0] <= 4 assert cam2img.shape[1] <= 4 assert points.shape[1] == 3 xys = points[:, :2] depths = points[:, 2].view(-1, 1) unnormed_xys = torch.cat([xys * depths, depths], dim=1) pad_cam2img = torch.eye(4, dtype=xys.dtype, device=xys.device) pad_cam2img[:cam2img.shape[0], :cam2img.shape[1]] = cam2img inv_pad_cam2img = torch.inverse(pad_cam2img).transpose(0, 1) # Do operation in homogeneous coordinates. num_points = unnormed_xys.shape[0] homo_xys = torch.cat([unnormed_xys, xys.new_ones((num_points, 1))], dim=1) points3D = torch.mm(homo_xys, inv_pad_cam2img)[:, :3] return points3D def mono_cam_box2vis(cam_box): """This is a post-processing function on the bboxes from Mono-3D task. If we want to perform projection visualization, we need to: 1. rotate the box along x-axis for np.pi / 2 (roll) 2. change orientation from local yaw to global yaw 3. convert yaw by (np.pi / 2 - yaw) After applying this function, we can project and draw it on 2D images. Args: cam_box (:obj:`CameraInstance3DBoxes`): 3D bbox in camera coordinate system before conversion. Could be gt bbox loaded from dataset or network prediction output. Returns: :obj:`CameraInstance3DBoxes`: Box after conversion. """ warning.warn('DeprecationWarning: The hack of yaw and dimension in the ' 'monocular 3D detection on nuScenes has been removed. The ' 'function mono_cam_box2vis will be deprecated.') from . import CameraInstance3DBoxes assert isinstance(cam_box, CameraInstance3DBoxes), \ 'input bbox should be CameraInstance3DBoxes!' loc = cam_box.gravity_center dim = cam_box.dims yaw = cam_box.yaw feats = cam_box.tensor[:, 7:] # rotate along x-axis for np.pi / 2 # see also here: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L557 # noqa dim[:, [1, 2]] = dim[:, [2, 1]] # change local yaw to global yaw for visualization # refer to https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L164-L166 # noqa yaw += torch.atan2(loc[:, 0], loc[:, 2]) # convert yaw by (-yaw - np.pi / 2) # this is because mono 3D box class such as `NuScenesBox` has different # definition of rotation with our `CameraInstance3DBoxes` yaw = -yaw - np.pi / 2 cam_box = torch.cat([loc, dim, yaw[:, None], feats], dim=1) cam_box = CameraInstance3DBoxes( cam_box, box_dim=cam_box.shape[-1], origin=(0.5, 0.5, 0.5)) return cam_box def get_proj_mat_by_coord_type(img_meta, coord_type): """Obtain image features using points. Args: img_meta (dict): Meta info. coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'. Can be case-insensitive. Returns: torch.Tensor: transformation matrix. """ coord_type = coord_type.upper() mapping = {'LIDAR': 'lidar2img', 'DEPTH': 'depth2img', 'CAMERA': 'cam2img'} assert coord_type in mapping.keys() return img_meta[mapping[coord_type]] def yaw2local(yaw, loc): """Transform global yaw to local yaw (alpha in kitti) in camera coordinates, ranges from -pi to pi. Args: yaw (torch.Tensor): A vector with local yaw of each box. shape: (N, ) loc (torch.Tensor): gravity center of each box. shape: (N, 3) Returns: torch.Tensor: local yaw (alpha in kitti). """ local_yaw = yaw - torch.atan2(loc[:, 0], loc[:, 2]) larger_idx = (local_yaw > np.pi).nonzero(as_tuple=False) small_idx = (local_yaw < -np.pi).nonzero(as_tuple=False) if len(larger_idx) != 0: local_yaw[larger_idx] -= 2 * np.pi if len(small_idx) != 0: local_yaw[small_idx] += 2 * np.pi return local_yaw
evocodebench_data_209
import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}'
evocodebench_data_210
import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}'
evocodebench_data_211
import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}'
evocodebench_data_212
import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}'
evocodebench_data_213
import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}'
evocodebench_data_214
import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}'
evocodebench_data_215
import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}'
evocodebench_data_216
import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}'
evocodebench_data_217
import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}'
evocodebench_data_218
import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}'
evocodebench_data_219
import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}'
evocodebench_data_220
import io import random import re import subprocess import tempfile import tokenize from collections import defaultdict from dataclasses import dataclass, field from enum import StrEnum from pathlib import Path from typing import ClassVar, Optional, TypeAlias ROOT_DIR = Path(__file__).parent.parent class Level(StrEnum): BASIC = "basic" INTERMEDIATE = "intermediate" ADVANCED = "advanced" EXTREME = "extreme" @classmethod def is_valid_level(cls, level: str): return level in cls._value2member_map_ ChallengeName: TypeAlias = str @dataclass(frozen=True) class ChallengeKey: level: Level name: ChallengeName @classmethod def from_str(cls, key: str): """Create a key object from a string like "basic-foo".""" level, name = key.split("-", maxsplit=1) return cls(Level(level), name) @dataclass class Challenge: """A challenge object. :param hints: An optional string of hints, use markdown syntax. """ CODE_SPLITTER: ClassVar[str] = "\n## End of your code ##\n" name: ChallengeName level: Level code: str user_code: str = field(default="", init=False) test_code: str = field(default="", init=False) hints: Optional[str] = None def __post_init__(self): self.parse_code() def parse_code(self): self.user_code, _, self.test_code = self.code.partition(self.CODE_SPLITTER) @dataclass(frozen=True, slots=True) class TypeCheckResult: message: str passed: bool debug_info: dict = field(default_factory=dict) # For debugging purposes class ChallengeManager: """The manager for challenges. :param root_dir: The root directory that contains the files of challenges. """ def __init__(self, root_dir: Optional[Path] = None): if not root_dir: root_dir = ROOT_DIR / "challenges" self.challenges: dict[ChallengeKey, Challenge] = self._load_challenges(root_dir) self.challenges_groupby_level: dict[Level, list[ChallengeName]] self.challenges_groupby_level = self._get_challenges_groupby_level() def has_challenge(self, key: ChallengeKey) -> bool: return key in self.challenges def get_challenge(self, key: ChallengeKey) -> Challenge: return self.challenges[key] @property def challenge_count(self) -> int: """The count of challenges.""" return len(self.challenges) def run_challenge(self, key: ChallengeKey, user_code: str) -> TypeCheckResult: challenge = self.get_challenge(key) # Make sure user code ends with a new line to avoid issue #63. return self._type_check_with_pyright(user_code + "\n", challenge.test_code) def get_random_challenge(self) -> dict[str, str]: level = random.choice(list(self.challenges_groupby_level.keys())) name = random.choice(self.challenges_groupby_level[level]) return {"level": level, "name": name} @staticmethod def _load_challenges(root_dir: Path) -> dict[ChallengeKey, Challenge]: challenges = {} for challenge_folder in root_dir.iterdir(): question_source = challenge_folder / "question.py" if not question_source.exists(): continue # Try to read the optional hints file hints_file = challenge_folder / "hints.md" if hints_file.exists(): hints = hints_file.read_text(encoding="utf-8").strip() else: hints = None key = ChallengeKey.from_str(challenge_folder.name) challenges[key] = Challenge( name=key.name, level=key.level, code=question_source.read_text(encoding="utf-8"), hints=hints, ) return challenges def _get_challenges_groupby_level(self) -> dict[Level, list[ChallengeName]]: groups: defaultdict[str, list[ChallengeName]] = defaultdict(list) for challenge in self.challenges.values(): groups[challenge.level].append(challenge.name) # Sort challenge by name alphabetically. for challenge_names in groups.values(): challenge_names.sort() # Make sure groups are ordered by level (from easy to hard) return {level: groups[level] for level in Level} EXPECT_ERROR_COMMENT = "expect-type-error" # Pyright error messages look like: # `<filename>:<line_no>:<col_no> - <error|warning|information>: <message>` # Here we only capture the error messages and line numbers PYRIGHT_MESSAGE_REGEX = r"^(?:.+?):(\d+):[\s\-\d]+(error:.+)$" @classmethod def _type_check_with_pyright( cls, user_code: str, test_code: str ) -> TypeCheckResult: code = f"{user_code}{test_code}" buffer = io.StringIO(code) # This produces a stream of TokenInfos, example: # TokenInfo(type=4 (NEWLINE), string='\n', start=(4, 3), end=(4, 4), line='"""\n'), # TokenInfo(type=62 (NL), string='\n', start=(5, 0), end=(5, 1), line='\n') # See https://docs.python.org/3/library/tokenize.html#tokenize.tokenize for more details tokens = list(tokenize.generate_tokens(buffer.readline)) # Find all lines that are followed by a comment # expect-type-error expect_error_line_numbers = [ token.start[0] for token in tokens if token.type == tokenize.COMMENT and token.string[1:].strip() == cls.EXPECT_ERROR_COMMENT ] # Tracks whether an expected error has been reported by type checker. error_line_seen_in_err_msg: dict[int, bool] = { lineno: False for lineno in expect_error_line_numbers } with tempfile.NamedTemporaryFile(suffix=".py") as temp: temp.write(code.encode()) temp.flush() # TODO: switch to json output to simplify output parsing. # https://microsoft.github.io/pyright/#/command-line?id=json-output raw_result = subprocess.run( ["pyright", "--pythonversion", "3.12", temp.name], capture_output=True, text=True, ) stdout, stderr = raw_result.stdout, raw_result.stderr if stderr: return TypeCheckResult(message=stderr, passed=False) error_lines: list[str] = [] # Substract lineno in merged code by lineno_delta, so that the lineno in # error message matches those in the test code editor. Fixed #20. lineno_delta = len(user_code.splitlines()) for line in stdout.splitlines(): m = re.match(cls.PYRIGHT_MESSAGE_REGEX, line) if m is None: continue line_number, message = int(m.group(1)), m.group(2) if line_number in error_line_seen_in_err_msg: # Each reported error should be attached to a specific line, # If it is commented with # expect-type-error, let it pass. error_line_seen_in_err_msg[line_number] = True continue # Error could be thrown from user code too, in which case delta shouldn't be applied. error_lines.append( f"{line_number if line_number <= lineno_delta else line_number - lineno_delta}:{message}" ) # If there are any lines that are expected to fail but not reported by pyright, # they should be considered as errors. for line_number, seen in error_line_seen_in_err_msg.items(): if not seen: error_lines.append( f"{line_number - lineno_delta}: error: Expected type error but instead passed" ) passed = len(error_lines) == 0 if passed: error_lines.append("\nAll tests passed") else: error_lines.append(f"\nFound {len(error_lines)} errors") return TypeCheckResult(message="\n".join(error_lines), passed=passed) challenge_manager = ChallengeManager()
evocodebench_data_221
import os import io import json import httpx import binascii import platform import urllib.parse from os import PathLike from pathlib import Path from hashlib import sha256 from base64 import b64encode, b64decode from typing import Any, AnyStr, Union, Optional, Sequence, Mapping, Literal import sys if sys.version_info < (3, 9): from typing import Iterator, AsyncIterator else: from collections.abc import Iterator, AsyncIterator from importlib import metadata try: __version__ = metadata.version('ollama') except metadata.PackageNotFoundError: __version__ = '0.0.0' from ollama._types import Message, Options, RequestError, ResponseError class BaseClient: def __init__( self, client, host: Optional[str] = None, follow_redirects: bool = True, timeout: Any = None, **kwargs, ) -> None: """ Creates a httpx client. Default parameters are the same as those defined in httpx except for the following: - `follow_redirects`: True - `timeout`: None `kwargs` are passed to the httpx client. """ headers = kwargs.pop('headers', {}) headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' headers['User-Agent'] = f'ollama-python/{__version__} ({platform.machine()} {platform.system().lower()}) Python/{platform.python_version()}' self._client = client( base_url=_parse_host(host or os.getenv('OLLAMA_HOST')), follow_redirects=follow_redirects, timeout=timeout, headers=headers, **kwargs, ) class Client(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.Client, host, **kwargs) def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response def _stream(self, method: str, url: str, **kwargs) -> Iterator[Mapping[str, Any]]: with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None for line in r.iter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: return self._stream(*args, **kwargs) if stream else self._request(*args, **kwargs).json() def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns a `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns a `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of Message or dict-like objects') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: return self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ).json() def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], Iterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise with open(path, 'rb') as r: self._request('POST', f'/api/blobs/{digest}', content=r) return digest def delete(self, model: str) -> Mapping[str, Any]: response = self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} def list(self) -> Mapping[str, Any]: return self._request('GET', '/api/tags').json() def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} def show(self, model: str) -> Mapping[str, Any]: return self._request('POST', '/api/show', json={'name': model}).json() class AsyncClient(BaseClient): def __init__(self, host: Optional[str] = None, **kwargs) -> None: super().__init__(httpx.AsyncClient, host, **kwargs) async def _request(self, method: str, url: str, **kwargs) -> httpx.Response: response = await self._client.request(method, url, **kwargs) try: response.raise_for_status() except httpx.HTTPStatusError as e: raise ResponseError(e.response.text, e.response.status_code) from None return response async def _stream(self, method: str, url: str, **kwargs) -> AsyncIterator[Mapping[str, Any]]: async def inner(): async with self._client.stream(method, url, **kwargs) as r: try: r.raise_for_status() except httpx.HTTPStatusError as e: e.response.read() raise ResponseError(e.response.text, e.response.status_code) from None async for line in r.aiter_lines(): partial = json.loads(line) if e := partial.get('error'): raise ResponseError(e) yield partial return inner() async def _request_stream( self, *args, stream: bool = False, **kwargs, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: if stream: return await self._stream(*args, **kwargs) response = await self._request(*args, **kwargs) return response.json() async def generate( self, model: str = '', prompt: str = '', system: str = '', template: str = '', context: Optional[Sequence[int]] = None, stream: bool = False, raw: bool = False, format: Literal['', 'json'] = '', images: Optional[Sequence[AnyStr]] = None, options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `GenerateResponse` if `stream` is `False`, otherwise returns an asynchronous `GenerateResponse` generator. """ if not model: raise RequestError('must provide a model') return await self._request_stream( 'POST', '/api/generate', json={ 'model': model, 'prompt': prompt, 'system': system, 'template': template, 'context': context or [], 'stream': stream, 'raw': raw, 'images': [_encode_image(image) for image in images or []], 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def chat( self, model: str = '', messages: Optional[Sequence[Message]] = None, stream: bool = False, format: Literal['', 'json'] = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Create a chat response using the requested model. Raises `RequestError` if a model is not provided. Raises `ResponseError` if the request could not be fulfilled. Returns `ChatResponse` if `stream` is `False`, otherwise returns an asynchronous `ChatResponse` generator. """ if not model: raise RequestError('must provide a model') for message in messages or []: if not isinstance(message, dict): raise TypeError('messages must be a list of strings') if not (role := message.get('role')) or role not in ['system', 'user', 'assistant']: raise RequestError('messages must contain a role and it must be one of "system", "user", or "assistant"') if not message.get('content'): raise RequestError('messages must contain content') if images := message.get('images'): message['images'] = [_encode_image(image) for image in images] return await self._request_stream( 'POST', '/api/chat', json={ 'model': model, 'messages': messages, 'stream': stream, 'format': format, 'options': options or {}, 'keep_alive': keep_alive, }, stream=stream, ) async def embeddings( self, model: str = '', prompt: str = '', options: Optional[Options] = None, keep_alive: Optional[Union[float, str]] = None, ) -> Sequence[float]: response = await self._request( 'POST', '/api/embeddings', json={ 'model': model, 'prompt': prompt, 'options': options or {}, 'keep_alive': keep_alive, }, ) return response.json() async def pull( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/pull', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def push( self, model: str, insecure: bool = False, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ return await self._request_stream( 'POST', '/api/push', json={ 'name': model, 'insecure': insecure, 'stream': stream, }, stream=stream, ) async def create( self, model: str, path: Optional[Union[str, PathLike]] = None, modelfile: Optional[str] = None, stream: bool = False, ) -> Union[Mapping[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Raises `ResponseError` if the request could not be fulfilled. Returns `ProgressResponse` if `stream` is `False`, otherwise returns a `ProgressResponse` generator. """ if (realpath := _as_path(path)) and realpath.exists(): modelfile = await self._parse_modelfile(realpath.read_text(), base=realpath.parent) elif modelfile: modelfile = await self._parse_modelfile(modelfile) else: raise RequestError('must provide either path or modelfile') return await self._request_stream( 'POST', '/api/create', json={ 'name': model, 'modelfile': modelfile, 'stream': stream, }, stream=stream, ) async def _parse_modelfile(self, modelfile: str, base: Optional[Path] = None) -> str: base = Path.cwd() if base is None else base out = io.StringIO() for line in io.StringIO(modelfile): command, _, args = line.partition(' ') if command.upper() not in ['FROM', 'ADAPTER']: print(line, end='', file=out) continue path = Path(args.strip()).expanduser() path = path if path.is_absolute() else base / path if path.exists(): args = f'@{await self._create_blob(path)}\n' print(command, args, end='', file=out) return out.getvalue() async def _create_blob(self, path: Union[str, Path]) -> str: sha256sum = sha256() with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break sha256sum.update(chunk) digest = f'sha256:{sha256sum.hexdigest()}' try: await self._request('HEAD', f'/api/blobs/{digest}') except ResponseError as e: if e.status_code != 404: raise async def upload_bytes(): with open(path, 'rb') as r: while True: chunk = r.read(32 * 1024) if not chunk: break yield chunk await self._request('POST', f'/api/blobs/{digest}', content=upload_bytes()) return digest async def delete(self, model: str) -> Mapping[str, Any]: response = await self._request('DELETE', '/api/delete', json={'name': model}) return {'status': 'success' if response.status_code == 200 else 'error'} async def list(self) -> Mapping[str, Any]: response = await self._request('GET', '/api/tags') return response.json() async def copy(self, source: str, destination: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/copy', json={'source': source, 'destination': destination}) return {'status': 'success' if response.status_code == 200 else 'error'} async def show(self, model: str) -> Mapping[str, Any]: response = await self._request('POST', '/api/show', json={'name': model}) return response.json() def _encode_image(image) -> str: """ >>> _encode_image(b'ollama') 'b2xsYW1h' >>> _encode_image(io.BytesIO(b'ollama')) 'b2xsYW1h' >>> _encode_image('LICENSE') 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image(Path('LICENSE')) 'TUlUIExpY2Vuc2UKCkNvcHlyaWdodCAoYykgT2xsYW1hCgpQZXJtaXNzaW9uIGlzIGhlcmVieSBncmFudGVkLCBmcmVlIG9mIGNoYXJnZSwgdG8gYW55IHBlcnNvbiBvYnRhaW5pbmcgYSBjb3B5Cm9mIHRoaXMgc29mdHdhcmUgYW5kIGFzc29jaWF0ZWQgZG9jdW1lbnRhdGlvbiBmaWxlcyAodGhlICJTb2Z0d2FyZSIpLCB0byBkZWFsCmluIHRoZSBTb2Z0d2FyZSB3aXRob3V0IHJlc3RyaWN0aW9uLCBpbmNsdWRpbmcgd2l0aG91dCBsaW1pdGF0aW9uIHRoZSByaWdodHMKdG8gdXNlLCBjb3B5LCBtb2RpZnksIG1lcmdlLCBwdWJsaXNoLCBkaXN0cmlidXRlLCBzdWJsaWNlbnNlLCBhbmQvb3Igc2VsbApjb3BpZXMgb2YgdGhlIFNvZnR3YXJlLCBhbmQgdG8gcGVybWl0IHBlcnNvbnMgdG8gd2hvbSB0aGUgU29mdHdhcmUgaXMKZnVybmlzaGVkIHRvIGRvIHNvLCBzdWJqZWN0IHRvIHRoZSBmb2xsb3dpbmcgY29uZGl0aW9uczoKClRoZSBhYm92ZSBjb3B5cmlnaHQgbm90aWNlIGFuZCB0aGlzIHBlcm1pc3Npb24gbm90aWNlIHNoYWxsIGJlIGluY2x1ZGVkIGluIGFsbApjb3BpZXMgb3Igc3Vic3RhbnRpYWwgcG9ydGlvbnMgb2YgdGhlIFNvZnR3YXJlLgoKVEhFIFNPRlRXQVJFIElTIFBST1ZJREVEICJBUyBJUyIsIFdJVEhPVVQgV0FSUkFOVFkgT0YgQU5ZIEtJTkQsIEVYUFJFU1MgT1IKSU1QTElFRCwgSU5DTFVESU5HIEJVVCBOT1QgTElNSVRFRCBUTyBUSEUgV0FSUkFOVElFUyBPRiBNRVJDSEFOVEFCSUxJVFksCkZJVE5FU1MgRk9SIEEgUEFSVElDVUxBUiBQVVJQT1NFIEFORCBOT05JTkZSSU5HRU1FTlQuIElOIE5PIEVWRU5UIFNIQUxMIFRIRQpBVVRIT1JTIE9SIENPUFlSSUdIVCBIT0xERVJTIEJFIExJQUJMRSBGT1IgQU5ZIENMQUlNLCBEQU1BR0VTIE9SIE9USEVSCkxJQUJJTElUWSwgV0hFVEhFUiBJTiBBTiBBQ1RJT04gT0YgQ09OVFJBQ1QsIFRPUlQgT1IgT1RIRVJXSVNFLCBBUklTSU5HIEZST00sCk9VVCBPRiBPUiBJTiBDT05ORUNUSU9OIFdJVEggVEhFIFNPRlRXQVJFIE9SIFRIRSBVU0UgT1IgT1RIRVIgREVBTElOR1MgSU4gVEhFClNPRlRXQVJFLgo=' >>> _encode_image('YWJj') 'YWJj' >>> _encode_image(b'YWJj') 'YWJj' """ if p := _as_path(image): return b64encode(p.read_bytes()).decode('utf-8') try: b64decode(image, validate=True) return image if isinstance(image, str) else image.decode('utf-8') except (binascii.Error, TypeError): ... if b := _as_bytesio(image): return b64encode(b.read()).decode('utf-8') raise RequestError('image must be bytes, path-like object, or file-like object') def _as_path(s: Optional[Union[str, PathLike]]) -> Union[Path, None]: if isinstance(s, str) or isinstance(s, Path): try: if (p := Path(s)).exists(): return p except Exception: ... return None def _as_bytesio(s: Any) -> Union[io.BytesIO, None]: if isinstance(s, io.BytesIO): return s elif isinstance(s, bytes): return io.BytesIO(s) return None def _parse_host(host: Optional[str]) -> str: """ >>> _parse_host(None) 'http://127.0.0.1:11434' >>> _parse_host('') 'http://127.0.0.1:11434' >>> _parse_host('1.2.3.4') 'http://1.2.3.4:11434' >>> _parse_host(':56789') 'http://127.0.0.1:56789' >>> _parse_host('1.2.3.4:56789') 'http://1.2.3.4:56789' >>> _parse_host('http://1.2.3.4') 'http://1.2.3.4:80' >>> _parse_host('https://1.2.3.4') 'https://1.2.3.4:443' >>> _parse_host('https://1.2.3.4:56789') 'https://1.2.3.4:56789' >>> _parse_host('example.com') 'http://example.com:11434' >>> _parse_host('example.com:56789') 'http://example.com:56789' >>> _parse_host('http://example.com') 'http://example.com:80' >>> _parse_host('https://example.com') 'https://example.com:443' >>> _parse_host('https://example.com:56789') 'https://example.com:56789' >>> _parse_host('example.com/') 'http://example.com:11434' >>> _parse_host('example.com:56789/') 'http://example.com:56789' """ host, port = host or '', 11434 scheme, _, hostport = host.partition('://') if not hostport: scheme, hostport = 'http', host elif scheme == 'http': port = 80 elif scheme == 'https': port = 443 split = urllib.parse.urlsplit('://'.join([scheme, hostport])) host = split.hostname or '127.0.0.1' port = split.port or port return f'{scheme}://{host}:{port}'
evocodebench_data_222
import contextlib import packaging.version from functorch.compile import (aot_function, aot_module) import torch @contextlib.contextmanager def no_fake_tensor(): if packaging.version.parse( torch.__version__) >= packaging.version.parse("2.0.0"): from torch._functorch import config use_fake_tensor = config.use_fake_tensor config.use_fake_tensor = False try: yield finally: config.use_fake_tensor = use_fake_tensor else: yield # The compiler_fn is called after the forward and backward graphs are extracted. # Here, we just print the code in the compiler_fn. Return of this function is a callable. def get_compiler_fn(title=None): def compiler_fn(fx_module: torch.fx.GraphModule, _): if title is not None: print(title) print(fx_module.code) return fx_module return compiler_fn def aot_printer(fn): if isinstance(fn, torch.nn.Module): return aot_module(fn, fw_compiler=get_compiler_fn("Forward Code:"), bw_compiler=get_compiler_fn("Backward Code:")) else: return aot_function(fn, fw_compiler=get_compiler_fn("Forward Code:"), bw_compiler=get_compiler_fn("Backward Code:"))
evocodebench_data_223
import logging import os import uuid from copy import deepcopy from typing import Optional, Dict, List import pandas as pd import uvicorn import yaml from fastapi import FastAPI from pydantic import BaseModel from autorag.support import get_support_modules from autorag.utils.util import load_summary_file logger = logging.getLogger("AutoRAG") def extract_node_line_names(config_dict: Dict) -> List[str]: """ Extract node line names with the given config dictionary order. :param config_dict: The yaml configuration dict for the pipeline. You can load this to access trail_folder/config.yaml. :return: The list of node line names. It is the order of the node line names in the pipeline. """ return [node_line['node_line_name'] for node_line in config_dict['node_lines']] def extract_node_strategy(config_dict: Dict) -> Dict: """ Extract node strategies with the given config dictionary. The return value is a dictionary of node type and its strategy. :param config_dict: The yaml configuration dict for the pipeline. You can load this to access trail_folder/config.yaml. :return: Key is node_type and value is strategy dict. """ return {node['node_type']: node.get('strategy', {}) for node_line in config_dict['node_lines'] for node in node_line['nodes']} def summary_df_to_yaml(summary_df: pd.DataFrame, config_dict: Dict) -> Dict: """ Convert trial summary dataframe to config yaml file. :param summary_df: The trial summary dataframe of the evaluated trial. :param config_dict: The yaml configuration dict for the pipeline. You can load this to access trail_folder/config.yaml. :return: Dictionary of config yaml file. You can save this dictionary to yaml file. """ # summary_df columns : 'node_line_name', 'node_type', 'best_module_filename', # 'best_module_name', 'best_module_params', 'best_execution_time' node_line_names = extract_node_line_names(config_dict) node_strategies = extract_node_strategy(config_dict) strategy_df = pd.DataFrame({ 'node_type': list(node_strategies.keys()), 'strategy': list(node_strategies.values()) }) summary_df = summary_df.merge(strategy_df, on='node_type', how='left') summary_df['categorical_node_line_name'] = pd.Categorical(summary_df['node_line_name'], categories=node_line_names, ordered=True) summary_df = summary_df.sort_values(by='categorical_node_line_name') grouped = summary_df.groupby('categorical_node_line_name') node_lines = [ { 'node_line_name': node_line_name, 'nodes': [ { 'node_type': row['node_type'], 'strategy': row['strategy'], 'modules': [{ 'module_type': row['best_module_name'], **row['best_module_params'] }] } for _, row in node_line.iterrows() ] } for node_line_name, node_line in grouped ] return {'node_lines': node_lines} def extract_best_config(trial_path: str, output_path: Optional[str] = None) -> Dict: """ Extract the optimal pipeline from evaluated trial. :param trial_path: The path to the trial directory that you want to extract the pipeline from. Must already be evaluated. :param output_path: Output path that pipeline yaml file will be saved. Must be .yaml or .yml file. If None, it does not save yaml file and just return dict values. Default is None. :return: The dictionary of the extracted pipeline. """ summary_path = os.path.join(trial_path, 'summary.csv') if not os.path.exists(summary_path): raise ValueError(f"summary.csv does not exist in {trial_path}.") trial_summary_df = load_summary_file(summary_path, dict_columns=['best_module_params']) config_yaml_path = os.path.join(trial_path, 'config.yaml') with open(config_yaml_path, 'r') as f: config_dict = yaml.safe_load(f) yaml_dict = summary_df_to_yaml(trial_summary_df, config_dict) if output_path is not None: with open(output_path, 'w') as f: yaml.dump(yaml_dict, f) return yaml_dict class Runner: def __init__(self, config: Dict, project_dir: Optional[str] = None): self.config = config self.project_dir = os.getcwd() if project_dir is None else project_dir self.app = FastAPI() self.__add_api_route() @classmethod def from_yaml(cls, yaml_path: str, project_dir: Optional[str] = None): """ Load Runner from yaml file. Must be extracted yaml file from evaluated trial using extract_best_config method. :param yaml_path: The path of the yaml file. :param project_dir: The path of the project directory. Default is the current directory. :return: Initialized Runner. """ with open(yaml_path, 'r') as f: try: config = yaml.safe_load(f) except yaml.YAMLError as exc: logger.error(exc) raise exc return cls(config, project_dir=project_dir) @classmethod def from_trial_folder(cls, trial_path: str): """ Load Runner from evaluated trial folder. Must already be evaluated using Evaluator class. It sets the project_dir as the parent directory of the trial folder. :param trial_path: The path of the trial folder. :return: Initialized Runner. """ config = extract_best_config(trial_path) return cls(config, project_dir=os.path.dirname(trial_path)) def run(self, query: str, result_column: str = "generated_texts"): """ Run the pipeline with query. The loaded pipeline must start with a single query, so the first module of the pipeline must be `query_expansion` or `retrieval` module. :param query: The query of the user. :param result_column: The result column name for the answer. Default is `generated_texts`, which is the output of the `generation` module. :return: The result of the pipeline. """ node_lines = deepcopy(self.config['node_lines']) previous_result = pd.DataFrame({ 'qid': str(uuid.uuid4()), 'query': [query], 'retrieval_gt': [[]], 'generation_gt': [''], }) # pseudo qa data for execution for node_line in node_lines: for node in node_line['nodes']: if len(node['modules']) != 1: raise ValueError("The number of modules in a node must be 1 for using runner." "Please use extract_best_config method for extracting yaml file from evaluated trial.") module = node['modules'][0] module_type = module.pop('module_type') module_params = module new_result = get_support_modules(module_type)( project_dir=self.project_dir, previous_result=previous_result, **module_params ) duplicated_columns = previous_result.columns.intersection(new_result.columns) drop_previous_result = previous_result.drop(columns=duplicated_columns) previous_result = pd.concat([drop_previous_result, new_result], axis=1) return previous_result[result_column].tolist()[0] def __add_api_route(self): @self.app.post("/run") async def run_pipeline(runner_input: RunnerInput): query = runner_input.query result_column = runner_input.result_column result = self.run(query, result_column) return {result_column: result} def run_api_server(self, host: str = '0.0.0.0', port: int = 8000, **kwargs): """ Run the pipeline as api server. You can send POST request to `http://host:port/run` with json body like below: .. Code:: json { "Query": "your query", "result_column": "answer" } And it returns json response like below: .. Code:: json { "answer": "your answer" } :param host: The host of the api server. :param port: The port of the api server. :param kwargs: Other arguments for uvicorn.run. """ logger.info(f"Run api server at {host}:{port}") uvicorn.run(self.app, host=host, port=port, **kwargs) class RunnerInput(BaseModel): query: str result_column: str = "answer"
evocodebench_data_224
import logging import inspect import functools import threading import torch from sfast.utils import flat_tensors from sfast.utils.copy import tree_copy from sfast.hooks.module_jit_hook import (apply_to_all_modules, apply_to_module) from .utils import better_trace logger = logging.getLogger() def trace_with_kwargs(func, example_inputs=None, example_kwarg_inputs=None, **kwargs): if example_inputs is None: example_inputs = tuple() if example_kwarg_inputs is None: example_kwarg_inputs = {} pos_args = flat_tensors.flattern( (tree_copy(example_inputs, detach=True), tree_copy(example_kwarg_inputs, detach=True))) traced_module = better_trace(TraceablePosArgOnlyModuleWrapper(func), pos_args, **kwargs) training = getattr(func, 'training', False) if isinstance( func, torch.nn.Module) else None return traced_module, lambda m: TracedPosArgOnlyModuleWrapper( m, training=training) def lazy_trace(func, *, ts_compiler=None, **kwargs_): lock = threading.Lock() traced_modules = {} name = getattr(func, '__name__', func.__class__.__name__) wrapped = func.forward if isinstance(func, torch.nn.Module) else func module_to_be_traced = to_module(wrapped) @functools.wraps(wrapped) def wrapper(*args, **kwargs): nonlocal lock, traced_modules key = (module_to_be_traced.training, hash_arg(args), hash_arg(kwargs)) traced_module = traced_modules.get(key) if traced_module is None: with lock: traced_module = traced_modules.get(key) if traced_module is None: logger.info(f'Tracing {name}') traced_m, call_helper = trace_with_kwargs( module_to_be_traced, args, kwargs, **kwargs_) if ts_compiler is not None: if 'call_helper' in inspect.signature( ts_compiler).parameters: traced_m = ts_compiler(traced_m, call_helper, args, kwargs) else: converted_args = call_helper( traced_m).convert_inputs(args, kwargs) traced_m = ts_compiler(traced_m, converted_args) traced_module = call_helper(traced_m) traced_modules[key] = traced_module return traced_module(*args, **kwargs) if isinstance(func, torch.nn.Module): wrapper.__self__ = func elif hasattr(func, '__self__'): wrapper.__self__ = func.__self__ wrapper._cached = traced_modules return wrapper def to_module(func, self=None): if isinstance(func, torch.nn.Module): return func class FuncModule(torch.nn.Module): def __init__(self, func, module=None): super().__init__() self.func = func self.module = module self.__name__ = func.__name__ @functools.wraps(func) def forward(self, *args, **kwargs): return self.func(*args, **kwargs) @property def training(self): return getattr(self.module, 'training', False) # set training status of the module @training.setter def training(self, mode): if hasattr(self, 'module') and hasattr(self.module, 'training'): self.module.training = mode if self is None and hasattr(func, '__self__') and isinstance( func.__self__, torch.nn.Module): self = func.__self__ if self is not None: return FuncModule(func, self) return FuncModule(func) def hash_arg(arg): # micro optimization: bool obj is an instance of int if isinstance(arg, (str, int, float, bytes)): return arg if isinstance(arg, (tuple, list)): return tuple(map(hash_arg, arg)) if isinstance(arg, dict): return tuple( sorted(((hash_arg(k), hash_arg(v)) for k, v in arg.items()), key=lambda x: x[0])) return type(arg) class TracedPosArgOnlyModuleWrapper(torch.nn.Module): def __init__(self, module, *, training=None): super().__init__() self.module = module if training is None: training = getattr(module, 'training', False) if isinstance( module, torch.nn.Module) else False self.train(training) def forward(self, *args, **kwargs): outputs = self.module(*self.convert_inputs(args, kwargs)) unflat_outputs = flat_tensors.unflattern(outputs) return unflat_outputs def convert_inputs(self, args=(), kwargs=None): if kwargs is None: kwargs = {} return flat_tensors.flattern((args, kwargs)) class TraceablePosArgOnlyModuleWrapper(torch.nn.Module): def __init__(self, module): super().__init__() self.module = module training = getattr(module, 'training', False) if isinstance( module, torch.nn.Module) else False self.train(training) def forward(self, *args): orig_args, orig_kwargs = flat_tensors.unflattern(args) outputs = self.module(*orig_args, **orig_kwargs) flat_outputs = flat_tensors.flattern(outputs) return flat_outputs def can_io_obj_be_perfectly_traced(obj): return flat_tensors.can_be_perfectly_flattened(obj) class AutoTraceCompiler: def __init__(self, *, ts_compiler=None, **kwargs): self.ts_compiler = ts_compiler self.kwargs = kwargs self._is_compiling = threading.local() def is_compiling(self): return getattr(self._is_compiling, 'value', False) def get_inputs_key(self, func, inputs, kwargs): if not can_io_obj_be_perfectly_traced((inputs, kwargs)): return None return (hash_arg(inputs), hash_arg(kwargs)) def get_outputs_key(self, func, outputs): if not can_io_obj_be_perfectly_traced(outputs): return None return (hash_arg(outputs), ) def compile(self, func, inputs, kwargs): self._is_compiling.value = True try: wrapped = func.forward if isinstance(func, torch.nn.Module) else func module_to_be_traced = to_module(wrapped) traced_m, call_helper = trace_with_kwargs(module_to_be_traced, inputs, kwargs, **self.kwargs) if self.ts_compiler is not None: if 'call_helper' in inspect.signature( self.ts_compiler).parameters: traced_m = self.ts_compiler(traced_m, call_helper, inputs, kwargs) else: converted_args = call_helper(traced_m).convert_inputs( inputs, kwargs) traced_m = self.ts_compiler(traced_m, converted_args) traced_module = call_helper(traced_m) @functools.wraps(wrapped) def functionalized(*args, **kwargs): return traced_module(*args, **kwargs) if isinstance(func, torch.nn.Module): functionalized.__self__ = func elif hasattr(func, '__self__'): functionalized.__self__ = func.__self__ return functionalized finally: self._is_compiling.value = False def apply_auto_trace_compiler(m, filter_func=None, recursive=True, **kwargs): if recursive: return apply_to_all_modules(m, AutoTraceCompiler(**kwargs), filter_func=filter_func) else: return apply_to_module(m, AutoTraceCompiler(**kwargs))
evocodebench_data_225
import logging import os import uuid from copy import deepcopy from typing import Optional, Dict, List import pandas as pd import uvicorn import yaml from fastapi import FastAPI from pydantic import BaseModel from autorag.support import get_support_modules from autorag.utils.util import load_summary_file logger = logging.getLogger("AutoRAG") def extract_node_line_names(config_dict: Dict) -> List[str]: """ Extract node line names with the given config dictionary order. :param config_dict: The yaml configuration dict for the pipeline. You can load this to access trail_folder/config.yaml. :return: The list of node line names. It is the order of the node line names in the pipeline. """ return [node_line['node_line_name'] for node_line in config_dict['node_lines']] def extract_node_strategy(config_dict: Dict) -> Dict: """ Extract node strategies with the given config dictionary. The return value is a dictionary of node type and its strategy. :param config_dict: The yaml configuration dict for the pipeline. You can load this to access trail_folder/config.yaml. :return: Key is node_type and value is strategy dict. """ return {node['node_type']: node.get('strategy', {}) for node_line in config_dict['node_lines'] for node in node_line['nodes']} def summary_df_to_yaml(summary_df: pd.DataFrame, config_dict: Dict) -> Dict: """ Convert trial summary dataframe to config yaml file. :param summary_df: The trial summary dataframe of the evaluated trial. :param config_dict: The yaml configuration dict for the pipeline. You can load this to access trail_folder/config.yaml. :return: Dictionary of config yaml file. You can save this dictionary to yaml file. """ # summary_df columns : 'node_line_name', 'node_type', 'best_module_filename', # 'best_module_name', 'best_module_params', 'best_execution_time' node_line_names = extract_node_line_names(config_dict) node_strategies = extract_node_strategy(config_dict) strategy_df = pd.DataFrame({ 'node_type': list(node_strategies.keys()), 'strategy': list(node_strategies.values()) }) summary_df = summary_df.merge(strategy_df, on='node_type', how='left') summary_df['categorical_node_line_name'] = pd.Categorical(summary_df['node_line_name'], categories=node_line_names, ordered=True) summary_df = summary_df.sort_values(by='categorical_node_line_name') grouped = summary_df.groupby('categorical_node_line_name') node_lines = [ { 'node_line_name': node_line_name, 'nodes': [ { 'node_type': row['node_type'], 'strategy': row['strategy'], 'modules': [{ 'module_type': row['best_module_name'], **row['best_module_params'] }] } for _, row in node_line.iterrows() ] } for node_line_name, node_line in grouped ] return {'node_lines': node_lines} def extract_best_config(trial_path: str, output_path: Optional[str] = None) -> Dict: """ Extract the optimal pipeline from evaluated trial. :param trial_path: The path to the trial directory that you want to extract the pipeline from. Must already be evaluated. :param output_path: Output path that pipeline yaml file will be saved. Must be .yaml or .yml file. If None, it does not save yaml file and just return dict values. Default is None. :return: The dictionary of the extracted pipeline. """ summary_path = os.path.join(trial_path, 'summary.csv') if not os.path.exists(summary_path): raise ValueError(f"summary.csv does not exist in {trial_path}.") trial_summary_df = load_summary_file(summary_path, dict_columns=['best_module_params']) config_yaml_path = os.path.join(trial_path, 'config.yaml') with open(config_yaml_path, 'r') as f: config_dict = yaml.safe_load(f) yaml_dict = summary_df_to_yaml(trial_summary_df, config_dict) if output_path is not None: with open(output_path, 'w') as f: yaml.dump(yaml_dict, f) return yaml_dict class Runner: def __init__(self, config: Dict, project_dir: Optional[str] = None): self.config = config self.project_dir = os.getcwd() if project_dir is None else project_dir self.app = FastAPI() self.__add_api_route() @classmethod def from_yaml(cls, yaml_path: str, project_dir: Optional[str] = None): """ Load Runner from yaml file. Must be extracted yaml file from evaluated trial using extract_best_config method. :param yaml_path: The path of the yaml file. :param project_dir: The path of the project directory. Default is the current directory. :return: Initialized Runner. """ with open(yaml_path, 'r') as f: try: config = yaml.safe_load(f) except yaml.YAMLError as exc: logger.error(exc) raise exc return cls(config, project_dir=project_dir) @classmethod def from_trial_folder(cls, trial_path: str): """ Load Runner from evaluated trial folder. Must already be evaluated using Evaluator class. It sets the project_dir as the parent directory of the trial folder. :param trial_path: The path of the trial folder. :return: Initialized Runner. """ config = extract_best_config(trial_path) return cls(config, project_dir=os.path.dirname(trial_path)) def run(self, query: str, result_column: str = "generated_texts"): """ Run the pipeline with query. The loaded pipeline must start with a single query, so the first module of the pipeline must be `query_expansion` or `retrieval` module. :param query: The query of the user. :param result_column: The result column name for the answer. Default is `generated_texts`, which is the output of the `generation` module. :return: The result of the pipeline. """ node_lines = deepcopy(self.config['node_lines']) previous_result = pd.DataFrame({ 'qid': str(uuid.uuid4()), 'query': [query], 'retrieval_gt': [[]], 'generation_gt': [''], }) # pseudo qa data for execution for node_line in node_lines: for node in node_line['nodes']: if len(node['modules']) != 1: raise ValueError("The number of modules in a node must be 1 for using runner." "Please use extract_best_config method for extracting yaml file from evaluated trial.") module = node['modules'][0] module_type = module.pop('module_type') module_params = module new_result = get_support_modules(module_type)( project_dir=self.project_dir, previous_result=previous_result, **module_params ) duplicated_columns = previous_result.columns.intersection(new_result.columns) drop_previous_result = previous_result.drop(columns=duplicated_columns) previous_result = pd.concat([drop_previous_result, new_result], axis=1) return previous_result[result_column].tolist()[0] def __add_api_route(self): @self.app.post("/run") async def run_pipeline(runner_input: RunnerInput): query = runner_input.query result_column = runner_input.result_column result = self.run(query, result_column) return {result_column: result} def run_api_server(self, host: str = '0.0.0.0', port: int = 8000, **kwargs): """ Run the pipeline as api server. You can send POST request to `http://host:port/run` with json body like below: .. Code:: json { "Query": "your query", "result_column": "answer" } And it returns json response like below: .. Code:: json { "answer": "your answer" } :param host: The host of the api server. :param port: The port of the api server. :param kwargs: Other arguments for uvicorn.run. """ logger.info(f"Run api server at {host}:{port}") uvicorn.run(self.app, host=host, port=port, **kwargs) class RunnerInput(BaseModel): query: str result_column: str = "answer"
evocodebench_data_226
import logging import os import pathlib from typing import List, Callable, Dict, Tuple import pandas as pd from autorag.evaluate import evaluate_retrieval from autorag.strategy import measure_speed, filter_by_threshold, select_best_average from autorag.utils.util import load_summary_file logger = logging.getLogger("AutoRAG") def run_retrieval_node(modules: List[Callable], module_params: List[Dict], previous_result: pd.DataFrame, node_line_dir: str, strategies: Dict, ) -> pd.DataFrame: """ Run evaluation and select the best module among retrieval node results. :param modules: Retrieval modules to run. :param module_params: Retrieval module parameters. :param previous_result: Previous result dataframe. Could be query expansion's best result or qa data. :param node_line_dir: This node line's directory. :param strategies: Strategies for retrieval node. :return: The best result dataframe. It contains previous result columns and retrieval node's result columns. """ if not os.path.exists(node_line_dir): os.makedirs(node_line_dir) project_dir = pathlib.PurePath(node_line_dir).parent.parent retrieval_gt = pd.read_parquet(os.path.join(project_dir, "data", "qa.parquet"))['retrieval_gt'].tolist() save_dir = os.path.join(node_line_dir, "retrieval") # node name if not os.path.exists(save_dir): os.makedirs(save_dir) def run_and_save(input_modules, input_module_params, filename_start: int): result, execution_times = zip(*map(lambda task: measure_speed( task[0], project_dir=project_dir, previous_result=previous_result, **task[1]), zip(input_modules, input_module_params))) average_times = list(map(lambda x: x / len(result[0]), execution_times)) # run metrics before filtering if strategies.get('metrics') is None: raise ValueError("You must at least one metrics for retrieval evaluation.") result = list(map(lambda x: evaluate_retrieval_node(x, retrieval_gt, strategies.get('metrics')), result)) # save results to folder filepaths = list(map(lambda x: os.path.join(save_dir, f'{x}.parquet'), range(filename_start, filename_start + len(input_modules)))) list(map(lambda x: x[0].to_parquet(x[1], index=False), zip(result, filepaths))) # execute save to parquet filename_list = list(map(lambda x: os.path.basename(x), filepaths)) summary_df = pd.DataFrame({ 'filename': filename_list, 'module_name': list(map(lambda module: module.__name__, input_modules)), 'module_params': input_module_params, 'execution_time': average_times, **{metric: list(map(lambda result: result[metric].mean(), result)) for metric in strategies.get('metrics')}, }) summary_df.to_csv(os.path.join(save_dir, 'summary.csv'), index=False) return result, average_times, summary_df # run retrieval modules except hybrid hybrid_module_names = ['hybrid_rrf', 'hybrid_cc'] filename_first = 0 if any([module.__name__ not in hybrid_module_names for module in modules]): non_hybrid_modules, non_hybrid_module_params = zip(*filter(lambda x: x[0].__name__ not in hybrid_module_names, zip(modules, module_params))) non_hybrid_results, non_hybrid_times, non_hybrid_summary_df = run_and_save(non_hybrid_modules, non_hybrid_module_params, filename_first) filename_first += len(non_hybrid_modules) else: non_hybrid_results, non_hybrid_times, non_hybrid_summary_df = [], [], pd.DataFrame() if any([module.__name__ in hybrid_module_names for module in modules]): hybrid_modules, hybrid_module_params = zip(*filter(lambda x: x[0].__name__ in hybrid_module_names, zip(modules, module_params))) if all(['target_module_params' in x for x in hybrid_module_params]): # If target_module_params are already given, run hybrid retrieval directly hybrid_results, hybrid_times, hybrid_summary_df = run_and_save(hybrid_modules, hybrid_module_params, filename_first) filename_first += len(hybrid_modules) else: target_modules = list(map(lambda x: x.pop('target_modules'), hybrid_module_params)) target_filenames = list(map(lambda x: select_result_for_hybrid(save_dir, x), target_modules)) ids_scores = list(map(lambda x: get_ids_and_scores(save_dir, x), target_filenames)) target_module_params = list(map(lambda x: get_module_params(save_dir, x), target_filenames)) hybrid_module_params = list(map(lambda x: {**x[0], **x[1]}, zip(hybrid_module_params, ids_scores))) real_hybrid_times = list(map(lambda filename: get_hybrid_execution_times(save_dir, filename), target_filenames)) hybrid_results, hybrid_times, hybrid_summary_df = run_and_save(hybrid_modules, hybrid_module_params, filename_first) filename_first += len(hybrid_modules) hybrid_times = real_hybrid_times.copy() hybrid_summary_df['execution_time'] = hybrid_times hybrid_summary_df = edit_summary_df_params(hybrid_summary_df, target_modules, target_module_params) else: hybrid_results, hybrid_times, hybrid_summary_df = [], [], pd.DataFrame() summary = pd.concat([non_hybrid_summary_df, hybrid_summary_df], ignore_index=True) results = non_hybrid_results + hybrid_results average_times = non_hybrid_times + hybrid_times filenames = summary['filename'].tolist() # filter by strategies if strategies.get('speed_threshold') is not None: results, filenames = filter_by_threshold(results, average_times, strategies['speed_threshold'], filenames) selected_result, selected_filename = select_best_average(results, strategies.get('metrics'), filenames) best_result = pd.concat([previous_result, selected_result], axis=1) # add summary.csv 'is_best' column summary['is_best'] = summary['filename'] == selected_filename # save the result files best_result.to_parquet(os.path.join(save_dir, f'best_{os.path.splitext(selected_filename)[0]}.parquet'), index=False) summary.to_csv(os.path.join(save_dir, 'summary.csv'), index=False) return best_result def evaluate_retrieval_node(result_df: pd.DataFrame, retrieval_gt, metrics) -> pd.DataFrame: """ Evaluate retrieval node from retrieval node result dataframe. :param result_df: The result dataframe from a retrieval node. :param retrieval_gt: Ground truth for retrieval from qa dataset. :param metrics: Metric list from input strategies. :return: Return result_df with metrics columns. The columns will be 'retrieved_contents', 'retrieved_ids', 'retrieve_scores', and metric names. """ @evaluate_retrieval(retrieval_gt=retrieval_gt, metrics=metrics) def evaluate_this_module(df: pd.DataFrame): return df['retrieved_contents'].tolist(), df['retrieved_ids'].tolist(), df['retrieve_scores'].tolist() return evaluate_this_module(result_df) def select_result_for_hybrid(node_dir: str, target_modules: Tuple) -> List[str]: """ Get ids and scores of target_module from summary.csv and each result parquet file. :param node_dir: The directory of the node. :param target_modules: The name of the target modules. :return: A list of filenames. """ def select_best_among_module(df: pd.DataFrame, module_name: str): modules_summary = df.loc[lambda row: row['module_name'] == module_name] if len(modules_summary) == 1: return modules_summary.iloc[0, :] elif len(modules_summary) <= 0: raise ValueError(f"module_name {module_name} does not exist in summary.csv. " f"You must run {module_name} before running hybrid retrieval.") metrics = modules_summary.drop(columns=['filename', 'module_name', 'module_params', 'execution_time']) metric_average = metrics.mean(axis=1) metric_average = metric_average.reset_index(drop=True) max_idx = metric_average.idxmax() best_module = modules_summary.iloc[max_idx, :] return best_module summary_df = load_summary_file(os.path.join(node_dir, "summary.csv")) best_results = list(map(lambda module_name: select_best_among_module(summary_df, module_name), target_modules)) best_filenames = list(map(lambda df: df['filename'], best_results)) return best_filenames def get_module_params(node_dir: str, filenames: List[str]) -> Tuple[Dict]: summary_df = load_summary_file(os.path.join(node_dir, "summary.csv")) best_results = summary_df[summary_df['filename'].isin(filenames)] module_params = best_results['module_params'].tolist() return tuple(module_params) def edit_summary_df_params(summary_df: pd.DataFrame, target_modules, target_module_params) -> pd.DataFrame: def delete_ids_scores(x): del x['ids'] del x['scores'] return x summary_df['module_params'] = summary_df['module_params'].apply(delete_ids_scores) summary_df['new_params'] = [{'target_modules': x, 'target_module_params': y} for x, y in zip(target_modules, target_module_params)] summary_df['module_params'] = summary_df.apply(lambda row: {**row['module_params'], **row['new_params']}, axis=1) summary_df = summary_df.drop(columns=['new_params']) return summary_df def get_ids_and_scores(node_dir: str, filenames: List[str]) -> Dict: best_results_df = list(map(lambda filename: pd.read_parquet(os.path.join(node_dir, filename)), filenames)) ids = tuple(map(lambda df: df['retrieved_ids'].apply(list).tolist(), best_results_df)) scores = tuple(map(lambda df: df['retrieve_scores'].apply(list).tolist(), best_results_df)) return { 'ids': ids, 'scores': scores, } def get_hybrid_execution_times(node_dir: str, filenames: List[str]) -> float: summary_df = load_summary_file(os.path.join(node_dir, "summary.csv")) best_results = summary_df[summary_df['filename'].isin(filenames)] execution_times = best_results['execution_time'].sum() return execution_times
evocodebench_data_227
import logging import os import pathlib from typing import List, Callable, Dict, Optional from copy import deepcopy import pandas as pd from autorag.nodes.retrieval.run import evaluate_retrieval_node from autorag.strategy import measure_speed, filter_by_threshold, select_best_average from autorag.utils.util import make_combinations, explode from autorag.support import get_support_modules logger = logging.getLogger("AutoRAG") def run_query_expansion_node(modules: List[Callable], module_params: List[Dict], previous_result: pd.DataFrame, node_line_dir: str, strategies: Dict, ) -> pd.DataFrame: """ Run evaluation and select the best module among query expansion node results. Initially, retrieval is run using expanded_queries, the result of the query_expansion module. The retrieval module is run as a combination of the retrieval_modules in strategies. If there are multiple retrieval_modules, run them all and choose the best result. If there are no retrieval_modules, run them with the default of bm25. In this way, the best result is selected for each module, and then the best result is selected. :param modules: Query expansion modules to run. :param module_params: Query expansion module parameters. :param previous_result: Previous result dataframe. In this case, it would be qa data. :param node_line_dir: This node line's directory. :param strategies: Strategies for query expansion node. :return: The best result dataframe. """ if not os.path.exists(node_line_dir): os.makedirs(node_line_dir) node_dir = os.path.join(node_line_dir, "query_expansion") if not os.path.exists(node_dir): os.makedirs(node_dir) project_dir = pathlib.PurePath(node_line_dir).parent.parent # run query expansion results, execution_times = zip(*map(lambda task: measure_speed( task[0], project_dir=project_dir, previous_result=previous_result, **task[1]), zip(modules, module_params))) average_times = list(map(lambda x: x / len(results[0]), execution_times)) # save results to folder pseudo_module_params = deepcopy(module_params) for i, module_param in enumerate(pseudo_module_params): if 'prompt' in module_params: module_param['prompt'] = str(i) filepaths = list(map(lambda x: os.path.join(node_dir, f'{x}.parquet'), range(len(modules)))) list(map(lambda x: x[0].to_parquet(x[1], index=False), zip(results, filepaths))) # execute save to parquet filenames = list(map(lambda x: os.path.basename(x), filepaths)) # make summary file summary_df = pd.DataFrame({ 'filename': filenames, 'module_name': list(map(lambda module: module.__name__, modules)), 'module_params': module_params, 'execution_time': average_times, }) # Run evaluation when there are more than one module. if len(modules) > 1: # pop general keys from strategies (e.g. metrics, speed_threshold) general_key = ['metrics', 'speed_threshold'] general_strategy = dict(filter(lambda x: x[0] in general_key, strategies.items())) extra_strategy = dict(filter(lambda x: x[0] not in general_key, strategies.items())) # first, filter by threshold if it is enabled. if general_strategy.get('speed_threshold') is not None: results, filenames = filter_by_threshold(results, average_times, general_strategy['speed_threshold'], filenames) # check metrics in strategy if general_strategy.get('metrics') is None: raise ValueError("You must at least one metrics for query expansion evaluation.") if extra_strategy.get('top_k') is None: extra_strategy['top_k'] = 10 # default value # get retrieval modules from strategy retrieval_callables, retrieval_params = make_retrieval_callable_params(extra_strategy) # get retrieval_gt retrieval_gt = pd.read_parquet(os.path.join(project_dir, "data", "qa.parquet"))['retrieval_gt'].tolist() # run evaluation evaluation_results = list(map(lambda result: evaluate_one_query_expansion_node( retrieval_callables, retrieval_params, result['queries'].tolist(), retrieval_gt, general_strategy['metrics'], project_dir, previous_result), results)) evaluation_df = pd.DataFrame({ 'filename': filenames, **{f'query_expansion_{metric_name}': list(map(lambda x: x[metric_name].mean(), evaluation_results)) for metric_name in general_strategy['metrics']} }) summary_df = pd.merge(on='filename', left=summary_df, right=evaluation_df, how='left') best_result, best_filename = select_best_average(evaluation_results, general_strategy['metrics'], filenames) # change metric name columns to query_expansion_metric_name best_result = best_result.rename(columns={ metric_name: f'query_expansion_{metric_name}' for metric_name in strategies['metrics']}) best_result = best_result.drop(columns=['retrieved_contents', 'retrieved_ids', 'retrieve_scores']) else: best_result, best_filename = results[0], filenames[0] best_result = pd.concat([previous_result, best_result], axis=1) # add 'is_best' column at summary file summary_df['is_best'] = summary_df['filename'] == best_filename # save files summary_df.to_csv(os.path.join(node_dir, "summary.csv"), index=False) best_result.to_parquet(os.path.join(node_dir, f"best_{os.path.splitext(best_filename)[0]}.parquet"), index=False) return best_result def evaluate_one_query_expansion_node(retrieval_funcs: List[Callable], retrieval_params: List[Dict], expanded_queries: List[List[str]], retrieval_gt: List[List[str]], metrics: List[str], project_dir, previous_result: pd.DataFrame) -> pd.DataFrame: previous_result['queries'] = expanded_queries retrieval_results = list(map(lambda x: x[0](project_dir=project_dir, previous_result=previous_result, **x[1]), zip(retrieval_funcs, retrieval_params))) evaluation_results = list(map(lambda x: evaluate_retrieval_node(x, retrieval_gt, metrics), retrieval_results)) best_result, _ = select_best_average(evaluation_results, metrics) best_result = pd.concat([previous_result, best_result], axis=1) return best_result def make_retrieval_callable_params(strategy_dict: Dict): """ strategy_dict looks like this: .. Code:: json { "metrics": ["retrieval_f1", "retrieval_recall"], "top_k": 50, "retrieval_modules": [ {"module_type": "bm25"}, {"module_type": "vectordb", "embedding_model": ["openai", "huggingface"]} ] } """ node_dict = deepcopy(strategy_dict) retrieval_module_list: Optional[List[Dict]] = node_dict.pop('retrieval_modules', None) if retrieval_module_list is None: retrieval_module_list = [{ 'module_type': 'bm25', }] node_params = node_dict modules = list(map(lambda module_dict: get_support_modules(module_dict.pop('module_type')), retrieval_module_list)) param_combinations = list(map(lambda module_dict: make_combinations({**module_dict, **node_params}), retrieval_module_list)) return explode(modules, param_combinations)
evocodebench_data_228
import os import pathlib from copy import deepcopy from typing import List, Callable, Dict, Optional, Union import pandas as pd from autorag.evaluate import evaluate_generation from autorag.evaluate.util import cast_metrics from autorag.strategy import measure_speed, filter_by_threshold, select_best_average from autorag.support import get_support_modules from autorag.utils import validate_qa_dataset from autorag.utils.util import make_combinations, explode def run_prompt_maker_node(modules: List[Callable], module_params: List[Dict], previous_result: pd.DataFrame, node_line_dir: str, strategies: Dict, ) -> pd.DataFrame: """ Run prompt maker node. With this function, you can select the best prompt maker module. As default, when you can use only one module, the evaluation will be skipped. If you want to select the best prompt among modules, you can use strategies. When you use them, you must pass 'generator_modules' and its parameters at strategies. Because it uses generator modules and generator metrics for evaluation this module. It is recommended to use one params and modules for evaluation, but you can use multiple params and modules for evaluation. When you don't set generator module at strategies, it will use the default generator module. The default generator module is llama_index_llm with openai gpt-3.5-turbo model. :param modules: Prompt maker modules to run. :param module_params: Prompt maker module parameters. :param previous_result: Previous result dataframe. Could be query expansion's best result or qa data. :param node_line_dir: This node line's directory. :param strategies: Strategies for prompt maker node. :return: The best result dataframe. It contains previous result columns and prompt maker's result columns which is 'prompts'. """ if not os.path.exists(node_line_dir): os.makedirs(node_line_dir) node_dir = os.path.join(node_line_dir, "prompt_maker") if not os.path.exists(node_dir): os.makedirs(node_dir) project_dir = pathlib.PurePath(node_line_dir).parent.parent # run modules results, execution_times = zip(*map(lambda task: measure_speed( task[0], project_dir=project_dir, previous_result=previous_result, **task[1]), zip(modules, module_params))) average_times = list(map(lambda x: x / len(results[0]), execution_times)) # save results to folder filepaths = list(map(lambda x: os.path.join(node_dir, f'{x}.parquet'), range(len(modules)))) list(map(lambda x: x[0].to_parquet(x[1], index=False), zip(results, filepaths))) # execute save to parquet filenames = list(map(lambda x: os.path.basename(x), filepaths)) # make summary file summary_df = pd.DataFrame({ 'filename': filenames, 'module_name': list(map(lambda module: module.__name__, modules)), 'module_params': module_params, 'execution_time': average_times, }) metric_names, metric_params = cast_metrics(strategies.get('metrics')) # Run evaluation when there are more than one module. if len(modules) > 1: # pop general keys from strategies (e.g. metrics, speed_threshold) general_key = ['metrics', 'speed_threshold'] general_strategy = dict(filter(lambda x: x[0] in general_key, strategies.items())) extra_strategy = dict(filter(lambda x: x[0] not in general_key, strategies.items())) # first, filter by threshold if it is enabled. if general_strategy.get('speed_threshold') is not None: results, filenames = filter_by_threshold(results, average_times, general_strategy['speed_threshold'], filenames) # run metrics before filtering if metric_names is None or len(metric_names) <= 0: raise ValueError("You must at least one metrics for prompt maker evaluation.") # get generator modules from strategy generator_callables, generator_params = make_generator_callable_params(extra_strategy) # get generation_gt qa_data = pd.read_parquet(os.path.join(project_dir, "data", "qa.parquet")) validate_qa_dataset(qa_data) generation_gt = qa_data['generation_gt'].tolist() generation_gt = list(map(lambda x: x.tolist(), generation_gt)) # run evaluations evaluation_results = list(map(lambda result: evaluate_one_prompt_maker_node( generator_callables, generator_params, result['prompts'].tolist(), generation_gt, general_strategy['metrics'], project_dir), results)) evaluation_df = pd.DataFrame({ 'filename': filenames, **{f'prompt_maker_{metric_name}': list(map(lambda x: x[metric_name].mean(), evaluation_results)) for metric_name in metric_names} }) summary_df = pd.merge(on='filename', left=summary_df, right=evaluation_df, how='left') best_result, best_filename = select_best_average(evaluation_results, metric_names, filenames) # change metric name columns to prompt_maker_metric_name best_result = best_result.rename(columns={ metric_name: f'prompt_maker_{metric_name}' for metric_name in metric_names}) best_result = best_result.drop(columns=['generated_texts']) else: best_result, best_filename = results[0], filenames[0] # add 'is_best' column at summary file summary_df['is_best'] = summary_df['filename'] == best_filename best_result = pd.concat([previous_result, best_result], axis=1) # save files summary_df.to_csv(os.path.join(node_dir, "summary.csv"), index=False) best_result.to_parquet(os.path.join(node_dir, f"best_{os.path.splitext(best_filename)[0]}.parquet"), index=False) return best_result def make_generator_callable_params(strategy_dict: Dict): node_dict = deepcopy(strategy_dict) generator_module_list: Optional[List[Dict]] = node_dict.pop('generator_modules', None) if generator_module_list is None: generator_module_list = [{ 'module_type': 'llama_index_llm', 'llm': 'openai', 'model': 'gpt-3.5-turbo', }] node_params = node_dict modules = list(map(lambda module_dict: get_support_modules(module_dict.pop('module_type')), generator_module_list)) param_combinations = list(map(lambda module_dict: make_combinations({**module_dict, **node_params}), generator_module_list)) return explode(modules, param_combinations) def evaluate_one_prompt_maker_node(generator_funcs: List[Callable], generator_params: List[Dict], prompts: List[str], generation_gt: List[List[str]], metrics: Union[List[str], List[Dict]], project_dir) -> pd.DataFrame: input_df = pd.DataFrame({'prompts': prompts}) generator_results = list(map(lambda x: x[0](project_dir=project_dir, previous_result=input_df, **x[1]), zip(generator_funcs, generator_params))) evaluation_results = list(map(lambda x: evaluate_generator_result(x[0], generation_gt, metrics), zip(generator_results, generator_funcs))) metric_names = list(map(lambda x: x['metric_name'], metrics)) if isinstance(metrics[0], dict) else metrics best_result, _ = select_best_average(evaluation_results, metric_names) best_result = pd.concat([input_df, best_result], axis=1) return best_result # it has 'generated_texts' column def evaluate_generator_result(result_df: pd.DataFrame, generation_gt: List[List[str]], metrics: Union[List[str], List[Dict]]) -> pd.DataFrame: @evaluate_generation(generation_gt=generation_gt, metrics=metrics) def evaluate(df): return df['generated_texts'].tolist() return evaluate(result_df)
evocodebench_data_229
import itertools import logging from copy import deepcopy from dataclasses import dataclass, field from typing import Dict, List, Callable, Tuple import pandas as pd from autorag.schema.module import Module from autorag.support import get_support_nodes from autorag.utils.util import make_combinations, explode logger = logging.getLogger("AutoRAG") @dataclass class Node: node_type: str strategy: Dict node_params: Dict modules: List[Module] run_node: Callable = field(init=False) def __post_init__(self): self.run_node = get_support_nodes(self.node_type) if self.run_node is None: raise ValueError(f"Node type {self.node_type} is not supported.") def get_param_combinations(self) -> Tuple[List[Callable], List[Dict]]: """ This method returns a combination of module and node parameters, also corresponding modules. :return: Each module and its module parameters. :rtype: Tuple[List[Callable], List[Dict]] """ def make_single_combination(module: Module) -> List[Dict]: input_dict = {**self.node_params, **module.module_param} return make_combinations(input_dict) combinations = list(map(make_single_combination, self.modules)) module_list, combination_list = explode(self.modules, combinations) return list(map(lambda x: x.module, module_list)), combination_list @classmethod def from_dict(cls, node_dict: Dict) -> 'Node': _node_dict = deepcopy(node_dict) node_type = _node_dict.pop('node_type') strategy = _node_dict.pop('strategy') modules = list(map(lambda x: Module.from_dict(x), _node_dict.pop('modules'))) node_params = _node_dict return cls(node_type, strategy, node_params, modules) def run(self, previous_result: pd.DataFrame, node_line_dir: str) -> pd.DataFrame: logger.info(f'Running node {self.node_type}...') input_modules, input_params = self.get_param_combinations() return self.run_node(modules=input_modules, module_params=input_params, previous_result=previous_result, node_line_dir=node_line_dir, strategies=self.strategy) def extract_values(node: Node, key: str) -> List[str]: """ This function extract values from node's modules' module_param. :param node: The node you want to extract values from. :param key: The key of module_param that you want to extract. :return: The list of extracted values. It removes duplicated elements automatically. """ def extract_module_values(module: Module): if key not in module.module_param: return [] value = module.module_param[key] if isinstance(value, str): return [value] elif isinstance(value, list): return value else: raise ValueError(f"{key} must be str or list, but got {type(value)}") values = list(map(extract_module_values, node.modules)) return list(set(list(itertools.chain.from_iterable(values)))) def extract_values_from_nodes(nodes: List[Node], key: str) -> List[str]: """ This function extract values from nodes' modules' module_param. :param nodes: The nodes you want to extract values from. :param key: The key of module_param that you want to extract. :return: The list of extracted values. It removes duplicated elements automatically. """ values = list(map(lambda node: extract_values(node, key), nodes)) return list(set(list(itertools.chain.from_iterable(values)))) def module_type_exists(nodes: List[Node], module_type: str) -> bool: """ This function check if the module type exists in the nodes. :param nodes: The nodes you want to check. :param module_type: The module type you want to check. :return: True if the module type exists in the nodes. """ return any(list(map(lambda node: any(list(map(lambda module: module.module_type == module_type, node.modules))), nodes)))
evocodebench_data_230
import functools import os from typing import List, Optional import evaluate import sacrebleu from llama_index.core.embeddings import BaseEmbedding from openai import OpenAI from autorag import embedding_models from autorag.evaluate.metric.util import calculate_cosine_similarity def generation_metric(func): @functools.wraps(func) def wrapper(generation_gt: List[List[str]], generations: List[str], **kwargs) -> List[float]: """ Compute generation metric. :param generation_gt: A list of ground truth. Must be 2-d list of string. Because it can be a multiple ground truth. :param generations: A list of generations that LLM generated. :param kwargs: The additional arguments for metric function. :return: A list of computed metric scores. """ # make generation_gt and generations to pd dataframe result = list(map(lambda x: func(x[0], x[1], **kwargs), zip(generation_gt, generations))) return result return wrapper def huggingface_evaluate(instance, key: str, generation_gt: List[List[str]], generations: List[str]) -> List[float]: """ Compute huggingface evaluate metric. :param instance: The instance of huggingface evaluates metric. :param key: The key to retrieve result score from huggingface evaluate result. :param generation_gt: A list of ground truth. Must be 2-d list of string. :param generations: A list of generations that LLM generated. :return: The list of scores. """ def compute_score(gt: List[str], pred: str) -> float: return max(list(map( lambda x: instance.compute(predictions=[pred], references=[x])[key], gt))) result = list(map(lambda x: compute_score(x[0], x[1]), zip(generation_gt, generations))) return result @generation_metric def bleu(gt: List[str], pred: str) -> float: """ Compute bleu score for generation. """ return sacrebleu.sentence_bleu(pred, gt).score def meteor(generation_gt: List[List[str]], generations: List[str]) -> List[float]: """ Compute meteor score for generation. :param generation_gt: A list of ground truth. Must be 2-d list of string. Because it can be a multiple ground truth. :param generations: A list of generations that LLM generated. :return: A list of computed metric scores. """ meteor_instance = evaluate.load("meteor") return huggingface_evaluate(meteor_instance, 'meteor', generation_gt, generations) def rouge(generation_gt: List[List[str]], generations: List[str]) -> List[float]: """ Compute rouge score for generation. :param generation_gt: A list of ground truth. Must be 2-d list of string. Because it can be a multiple ground truth. :param generations: A list of generations that LLM generated. :return: A list of computed metric scores. """ rouge_instance = evaluate.load("rouge") return huggingface_evaluate(rouge_instance, 'rougeL', generation_gt, generations) @generation_metric def sem_score(generation_gt: List[str], pred: str, embedding_model: Optional[BaseEmbedding] = None) -> float: """ Compute sem score between generation gt and pred with cosine similarity. :param generation_gt: A list of ground truth. Must be list of string. It will get the max of cosine similarity between generation_gt and pred. :param pred: Model prediction. :param embedding_model: Embedding model to use for compute cosine similarity. Default is all-mpnet-base-v2 embedding model. The paper used this embedding model. :return: Sem score between generation_gt and pred. """ if embedding_model is None: embedding_model = embedding_models['huggingface_all_mpnet_base_v2'] gt_embeddings = embedding_model.get_text_embedding_batch(generation_gt) pred_embedding = embedding_model.get_text_embedding(pred) # calculate cosine similarity similarity_scores: List[float] = list(map(lambda x: calculate_cosine_similarity(x, pred_embedding), gt_embeddings)) return max(similarity_scores) @generation_metric def g_eval(generation_gt: List[str], pred: str, metrics: Optional[List[str]] = None, model: str = 'gpt-4-0125-preview', ) -> float: """ Calculate G-Eval score. G-eval is a metric that uses high-performance LLM model to evaluate generation performance. It evaluates the generation result by coherence, consistency, fluency, and relevance. It uses only 'openai' model, and we recommend to use gpt-4 for evaluation accuracy. :param generation_gt: A list of ground truth. :param pred: Model generation. :param metrics: A list of metrics to use for evaluation. Default is all metrics, which is ['coherence', 'consistency', 'fluency', 'relevance']. :param model: OpenAI model name. Default is 'gpt-4-0125-preview'. :return: G-Eval score. """ available_metrics = ['coherence', 'consistency', 'fluency', 'relevance'] if metrics is None: metrics = available_metrics else: assert len(metrics) > 0, "metrics must be a list of string" metrics = [metric for metric in metrics if metric in available_metrics] current_path = os.path.dirname(os.path.realpath(__file__)) prompt_path = os.path.join(current_path, 'g_eval_prompts') g_eval_prompts = { "coherence": open(os.path.join(prompt_path, "coh_detailed.txt")).read(), "consistency": open(os.path.join(prompt_path, "con_detailed.txt")).read(), "fluency": open(os.path.join(prompt_path, "flu_detailed.txt")).read(), "relevance": open(os.path.join(prompt_path, "rel_detailed.txt")).read(), } client = OpenAI() def g_eval_score(prompt: str, gen_gt: List[str], pred: str): scores = [] for gt in gen_gt: input_prompt = prompt.replace('{{Document}}', gt).replace('{{Summary}}', pred) response = client.chat.completions.create( model=model, messages=[ {"role": "system", "content": input_prompt} ], logprobs=True, top_logprobs=5, temperature=0, max_tokens=2, frequency_penalty=0, presence_penalty=0, stop=None, n=20, ) if '(1-3):' in prompt: scores.append(get_g_eval_score(response, max_score=3)) else: scores.append(get_g_eval_score(response)) return max(scores) def get_g_eval_score(responses, max_score: int = 5) -> int: target_tokens = {str(i): 0 for i in range(1, max_score + 1)} for choice in responses.choices: first_top_log_probs = choice.logprobs.content[0].top_logprobs for i, top_log_prob in enumerate(list(map(lambda x: x.token, first_top_log_probs))): if top_log_prob in target_tokens: target_tokens[top_log_prob] += (5 - i) return int(max(target_tokens, key=target_tokens.get)) g_eval_scores = list(map(lambda x: g_eval_score(g_eval_prompts[x], generation_gt, pred), metrics)) return sum(g_eval_scores) / len(g_eval_scores)
evocodebench_data_231
from __future__ import annotations import logging import os import torch from modules import ( devices, errors, face_restoration, face_restoration_utils, modelloader, shared, ) logger = logging.getLogger(__name__) model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth" model_download_name = "GFPGANv1.4.pth" gfpgan_face_restorer: face_restoration.FaceRestoration | None = None class FaceRestorerGFPGAN(face_restoration_utils.CommonFaceRestoration): def name(self): return "GFPGAN" def get_device(self): return devices.device_gfpgan def load_net(self) -> torch.Module: for model_path in modelloader.load_models( model_path=self.model_path, model_url=model_url, command_path=self.model_path, download_name=model_download_name, ext_filter=['.pth'], ): if 'GFPGAN' in os.path.basename(model_path): model = modelloader.load_spandrel_model( model_path, device=self.get_device(), expected_architecture='GFPGAN', ).model model.different_w = True # see https://github.com/chaiNNer-org/spandrel/pull/81 return model raise ValueError("No GFPGAN model found") def restore(self, np_image): def restore_face(cropped_face_t): assert self.net is not None return self.net(cropped_face_t, return_rgb=False)[0] return self.restore_with_helper(np_image, restore_face) def gfpgan_fix_faces(np_image): if gfpgan_face_restorer: return gfpgan_face_restorer.restore(np_image) logger.warning("GFPGAN face restorer not set up") return np_image def setup_model(dirname: str) -> None: global gfpgan_face_restorer try: face_restoration_utils.patch_facexlib(dirname) gfpgan_face_restorer = FaceRestorerGFPGAN(model_path=dirname) shared.face_restorers.append(gfpgan_face_restorer) except Exception: errors.report("Error setting up GFPGAN", exc_info=True)
evocodebench_data_232
from __future__ import annotations import logging import torch from modules import ( devices, errors, face_restoration, face_restoration_utils, modelloader, shared, ) logger = logging.getLogger(__name__) model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth' model_download_name = 'codeformer-v0.1.0.pth' # used by e.g. postprocessing_codeformer.py codeformer: face_restoration.FaceRestoration | None = None class FaceRestorerCodeFormer(face_restoration_utils.CommonFaceRestoration): def name(self): return "CodeFormer" def load_net(self) -> torch.Module: for model_path in modelloader.load_models( model_path=self.model_path, model_url=model_url, command_path=self.model_path, download_name=model_download_name, ext_filter=['.pth'], ): return modelloader.load_spandrel_model( model_path, device=devices.device_codeformer, expected_architecture='CodeFormer', ).model raise ValueError("No codeformer model found") def get_device(self): return devices.device_codeformer def restore(self, np_image, w: float | None = None): if w is None: w = getattr(shared.opts, "code_former_weight", 0.5) def restore_face(cropped_face_t): assert self.net is not None return self.net(cropped_face_t, w=w, adain=True)[0] return self.restore_with_helper(np_image, restore_face) def setup_model(dirname: str) -> None: global codeformer try: codeformer = FaceRestorerCodeFormer(dirname) shared.face_restorers.append(codeformer) except Exception: errors.report("Error setting up CodeFormer", exc_info=True)
evocodebench_data_233
from __future__ import annotations import logging import os import torch from modules import ( devices, errors, face_restoration, face_restoration_utils, modelloader, shared, ) logger = logging.getLogger(__name__) model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth" model_download_name = "GFPGANv1.4.pth" gfpgan_face_restorer: face_restoration.FaceRestoration | None = None class FaceRestorerGFPGAN(face_restoration_utils.CommonFaceRestoration): def name(self): return "GFPGAN" def get_device(self): return devices.device_gfpgan def load_net(self) -> torch.Module: for model_path in modelloader.load_models( model_path=self.model_path, model_url=model_url, command_path=self.model_path, download_name=model_download_name, ext_filter=['.pth'], ): if 'GFPGAN' in os.path.basename(model_path): model = modelloader.load_spandrel_model( model_path, device=self.get_device(), expected_architecture='GFPGAN', ).model model.different_w = True # see https://github.com/chaiNNer-org/spandrel/pull/81 return model raise ValueError("No GFPGAN model found") def restore(self, np_image): def restore_face(cropped_face_t): assert self.net is not None return self.net(cropped_face_t, return_rgb=False)[0] return self.restore_with_helper(np_image, restore_face) def gfpgan_fix_faces(np_image): if gfpgan_face_restorer: return gfpgan_face_restorer.restore(np_image) logger.warning("GFPGAN face restorer not set up") return np_image def setup_model(dirname: str) -> None: global gfpgan_face_restorer try: face_restoration_utils.patch_facexlib(dirname) gfpgan_face_restorer = FaceRestorerGFPGAN(model_path=dirname) shared.face_restorers.append(gfpgan_face_restorer) except Exception: errors.report("Error setting up GFPGAN", exc_info=True)
evocodebench_data_234
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Quaternion math. This module assumes the xyzw quaternion format where xyz is the imaginary part and w is the real part. Functions in this module support both batched and unbatched quaternions. Some parts have been adapted from Ceres. """ from internal import spin_math from jax import numpy as jnp from jax.numpy import linalg def _safe_sqrt(x): """safe_sqrt with the value at zero set to eps to avoid divide by zero.""" return spin_math.safe_sqrt(x, value_at_zero=jnp.finfo(jnp.float32).eps) def im(q): """Fetch the imaginary part of the quaternion.""" return q[Ellipsis, :3] def re(q): """Fetch the real part of the quaternion.""" return q[Ellipsis, 3:] def identity(): return jnp.array([0.0, 0.0, 0.0, 1.0]) def conjugate(q): """Compute the conjugate of a quaternion.""" return jnp.concatenate([-im(q), re(q)], axis=-1) def inverse(q): """Compute the inverse of a quaternion.""" return normalize(conjugate(q)) def normalize(q): """Normalize a quaternion.""" return q / norm(q) def norm(q): return linalg.norm(q, axis=-1, keepdims=True) def multiply(q1, q2): """Multiply two quaternions.""" c = re(q1) * im(q2) + re(q2) * im(q1) + jnp.cross(im(q1), im(q2)) w = re(q1) * re(q2) - jnp.dot(im(q1), im(q2)) return jnp.concatenate([c, w], axis=-1) def rotate(q, v): """Rotate a vector using a quaternion.""" # Create the quaternion representation of the vector. q_v = jnp.concatenate([v, jnp.zeros_like(v[Ellipsis, :1])], axis=-1) return im(multiply(multiply(q, q_v), conjugate(q))) def log(q, eps = 1e-8): """Computes the quaternion logarithm. References: https://en.wikipedia.org/wiki/Quaternion#Exponential,_logarithm,_and_power_functions Args: q: the quaternion in (x,y,z,w) format. eps: an epsilon value for numerical stability. Returns: The logarithm of q. """ mag = linalg.norm(q, axis=-1, keepdims=True) v = im(q) s = re(q) w = jnp.log(mag) denom = jnp.maximum( linalg.norm(v, axis=-1, keepdims=True), eps * jnp.ones_like(v) ) xyz = v / denom * spin_math.safe_acos(s / eps) return jnp.concatenate((xyz, w), axis=-1) def exp(q, eps = 1e-8): """Computes the quaternion exponential. References: https://en.wikipedia.org/wiki/Quaternion#Exponential,_logarithm,_and_power_functions Args: q: the quaternion in (x,y,z,w) format or (x,y,z) if is_pure is True. eps: an epsilon value for numerical stability. Returns: The exponential of q. """ is_pure = q.shape[-1] == 3 if is_pure: s = jnp.zeros_like(q[Ellipsis, -1:]) v = q else: v = im(q) s = re(q) norm_v = linalg.norm(v, axis=-1, keepdims=True) exp_s = jnp.exp(s) w = jnp.cos(norm_v) xyz = jnp.sin(norm_v) * v / jnp.maximum(norm_v, eps * jnp.ones_like(norm_v)) return exp_s * jnp.concatenate((xyz, w), axis=-1) def to_rotation_matrix(q): """Constructs a rotation matrix from a quaternion. Args: q: a (*,4) array containing quaternions. Returns: A (*,3,3) array containing rotation matrices. """ x, y, z, w = jnp.split(q, 4, axis=-1) s = 1.0 / jnp.sum(q**2, axis=-1) return jnp.stack( [ jnp.stack( [ 1 - 2 * s * (y**2 + z**2), 2 * s * (x * y - z * w), 2 * s * (x * z + y * w), ], axis=0, ), jnp.stack( [ 2 * s * (x * y + z * w), 1 - s * 2 * (x**2 + z**2), 2 * s * (y * z - x * w), ], axis=0, ), jnp.stack( [ 2 * s * (x * z - y * w), 2 * s * (y * z + x * w), 1 - 2 * s * (x**2 + y**2), ], axis=0, ), ], axis=0, ) def from_rotation_matrix(m, eps = 1e-9): """Construct quaternion from a rotation matrix. Args: m: a (*,3,3) array containing rotation matrices. eps: a small number for numerical stability. Returns: A (*,4) array containing quaternions. """ trace = jnp.trace(m) m00 = m[Ellipsis, 0, 0] m01 = m[Ellipsis, 0, 1] m02 = m[Ellipsis, 0, 2] m10 = m[Ellipsis, 1, 0] m11 = m[Ellipsis, 1, 1] m12 = m[Ellipsis, 1, 2] m20 = m[Ellipsis, 2, 0] m21 = m[Ellipsis, 2, 1] m22 = m[Ellipsis, 2, 2] def tr_positive(): sq = _safe_sqrt(trace + 1.0) * 2.0 # sq = 4 * w. w = 0.25 * sq x = jnp.divide(m21 - m12, sq) y = jnp.divide(m02 - m20, sq) z = jnp.divide(m10 - m01, sq) return jnp.stack((x, y, z, w), axis=-1) def cond_1(): sq = _safe_sqrt(1.0 + m00 - m11 - m22 + eps) * 2.0 # sq = 4 * x. w = jnp.divide(m21 - m12, sq) x = 0.25 * sq y = jnp.divide(m01 + m10, sq) z = jnp.divide(m02 + m20, sq) return jnp.stack((x, y, z, w), axis=-1) def cond_2(): sq = _safe_sqrt(1.0 + m11 - m00 - m22 + eps) * 2.0 # sq = 4 * y. w = jnp.divide(m02 - m20, sq) x = jnp.divide(m01 + m10, sq) y = 0.25 * sq z = jnp.divide(m12 + m21, sq) return jnp.stack((x, y, z, w), axis=-1) def cond_3(): sq = _safe_sqrt(1.0 + m22 - m00 - m11 + eps) * 2.0 # sq = 4 * z. w = jnp.divide(m10 - m01, sq) x = jnp.divide(m02 + m20, sq) y = jnp.divide(m12 + m21, sq) z = 0.25 * sq return jnp.stack((x, y, z, w), axis=-1) def cond_idx(cond): cond = jnp.expand_dims(cond, -1) cond = jnp.tile(cond, [1] * (len(m.shape) - 2) + [4]) return cond where_2 = jnp.where(cond_idx(m11 > m22), cond_2(), cond_3()) where_1 = jnp.where(cond_idx((m00 > m11) & (m00 > m22)), cond_1(), where_2) return jnp.where(cond_idx(trace > 0), tr_positive(), where_1) def from_axis_angle( axis_angle, eps = jnp.finfo(jnp.float32).eps ): """Constructs a quaternion for the given axis/angle rotation. Args: axis_angle: A 3-vector where the direction is the axis of rotation and the magnitude is the angle of rotation. eps: A small number used for numerical stability around zero rotations. Returns: A quaternion encoding the same rotation. """ theta_squared = jnp.sum(axis_angle**2, axis=-1) theta = _safe_sqrt(theta_squared) half_theta = theta / 2.0 k = jnp.sin(half_theta) / theta # Avoid evaluating sqrt when theta is close to zero. k = jnp.where(theta_squared > eps**2, k, 0.5) qw = jnp.where(theta_squared > eps**2, jnp.cos(half_theta), 1.0) qx = axis_angle[0] * k qy = axis_angle[1] * k qz = axis_angle[2] * k return jnp.squeeze(jnp.array([qx, qy, qz, qw])) def to_axis_angle( q, eps = jnp.finfo(jnp.float32).eps ): """Converts a quaternion to an axis-angle representation. Args: q: a 4-vector representing a unit quaternion. eps: A small number used for numerical stability around zero rotations. Returns: A 3-vector where the direction is the axis of rotation and the magnitude is the angle of rotation. """ sin_sq_theta = jnp.sum(im(q) ** 2, axis=-1) sin_theta = _safe_sqrt(sin_sq_theta) cos_theta = re(q) # If cos_theta is negative, theta is greater than pi/2, which # means that angle for the angle_axis vector which is 2 * theta # would be greater than pi. # # While this will result in the correct rotation, it does not # result in a normalized angle-axis vector. # # In that case we observe that 2 * theta ~ 2 * theta - 2 * pi, # which is equivalent saying # # theta - pi = atan(sin(theta - pi), cos(theta - pi)) # = atan(-sin(theta), -cos(theta)) two_theta = 2.0 * jnp.where( cos_theta < 0.0, jnp.arctan2(-sin_theta, -cos_theta), jnp.arctan2(sin_theta, cos_theta), ) # For zero rotation, sqrt() will produce NaN in the derivative since # the argument is zero. We avoid this by directly returning the value in # such cases. k = jnp.where(sin_sq_theta > eps**2, two_theta / sin_theta, 2.0) return im(q) * k
evocodebench_data_235
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Quaternion math. This module assumes the xyzw quaternion format where xyz is the imaginary part and w is the real part. Functions in this module support both batched and unbatched quaternions. Some parts have been adapted from Ceres. """ from internal import spin_math from jax import numpy as jnp from jax.numpy import linalg def _safe_sqrt(x): """safe_sqrt with the value at zero set to eps to avoid divide by zero.""" return spin_math.safe_sqrt(x, value_at_zero=jnp.finfo(jnp.float32).eps) def im(q): """Fetch the imaginary part of the quaternion.""" return q[Ellipsis, :3] def re(q): """Fetch the real part of the quaternion.""" return q[Ellipsis, 3:] def identity(): return jnp.array([0.0, 0.0, 0.0, 1.0]) def conjugate(q): """Compute the conjugate of a quaternion.""" return jnp.concatenate([-im(q), re(q)], axis=-1) def inverse(q): """Compute the inverse of a quaternion.""" return normalize(conjugate(q)) def normalize(q): """Normalize a quaternion.""" return q / norm(q) def norm(q): return linalg.norm(q, axis=-1, keepdims=True) def multiply(q1, q2): """Multiply two quaternions.""" c = re(q1) * im(q2) + re(q2) * im(q1) + jnp.cross(im(q1), im(q2)) w = re(q1) * re(q2) - jnp.dot(im(q1), im(q2)) return jnp.concatenate([c, w], axis=-1) def rotate(q, v): """Rotate a vector using a quaternion.""" # Create the quaternion representation of the vector. q_v = jnp.concatenate([v, jnp.zeros_like(v[Ellipsis, :1])], axis=-1) return im(multiply(multiply(q, q_v), conjugate(q))) def log(q, eps = 1e-8): """Computes the quaternion logarithm. References: https://en.wikipedia.org/wiki/Quaternion#Exponential,_logarithm,_and_power_functions Args: q: the quaternion in (x,y,z,w) format. eps: an epsilon value for numerical stability. Returns: The logarithm of q. """ mag = linalg.norm(q, axis=-1, keepdims=True) v = im(q) s = re(q) w = jnp.log(mag) denom = jnp.maximum( linalg.norm(v, axis=-1, keepdims=True), eps * jnp.ones_like(v) ) xyz = v / denom * spin_math.safe_acos(s / eps) return jnp.concatenate((xyz, w), axis=-1) def exp(q, eps = 1e-8): """Computes the quaternion exponential. References: https://en.wikipedia.org/wiki/Quaternion#Exponential,_logarithm,_and_power_functions Args: q: the quaternion in (x,y,z,w) format or (x,y,z) if is_pure is True. eps: an epsilon value for numerical stability. Returns: The exponential of q. """ is_pure = q.shape[-1] == 3 if is_pure: s = jnp.zeros_like(q[Ellipsis, -1:]) v = q else: v = im(q) s = re(q) norm_v = linalg.norm(v, axis=-1, keepdims=True) exp_s = jnp.exp(s) w = jnp.cos(norm_v) xyz = jnp.sin(norm_v) * v / jnp.maximum(norm_v, eps * jnp.ones_like(norm_v)) return exp_s * jnp.concatenate((xyz, w), axis=-1) def to_rotation_matrix(q): """Constructs a rotation matrix from a quaternion. Args: q: a (*,4) array containing quaternions. Returns: A (*,3,3) array containing rotation matrices. """ x, y, z, w = jnp.split(q, 4, axis=-1) s = 1.0 / jnp.sum(q**2, axis=-1) return jnp.stack( [ jnp.stack( [ 1 - 2 * s * (y**2 + z**2), 2 * s * (x * y - z * w), 2 * s * (x * z + y * w), ], axis=0, ), jnp.stack( [ 2 * s * (x * y + z * w), 1 - s * 2 * (x**2 + z**2), 2 * s * (y * z - x * w), ], axis=0, ), jnp.stack( [ 2 * s * (x * z - y * w), 2 * s * (y * z + x * w), 1 - 2 * s * (x**2 + y**2), ], axis=0, ), ], axis=0, ) def from_rotation_matrix(m, eps = 1e-9): """Construct quaternion from a rotation matrix. Args: m: a (*,3,3) array containing rotation matrices. eps: a small number for numerical stability. Returns: A (*,4) array containing quaternions. """ trace = jnp.trace(m) m00 = m[Ellipsis, 0, 0] m01 = m[Ellipsis, 0, 1] m02 = m[Ellipsis, 0, 2] m10 = m[Ellipsis, 1, 0] m11 = m[Ellipsis, 1, 1] m12 = m[Ellipsis, 1, 2] m20 = m[Ellipsis, 2, 0] m21 = m[Ellipsis, 2, 1] m22 = m[Ellipsis, 2, 2] def tr_positive(): sq = _safe_sqrt(trace + 1.0) * 2.0 # sq = 4 * w. w = 0.25 * sq x = jnp.divide(m21 - m12, sq) y = jnp.divide(m02 - m20, sq) z = jnp.divide(m10 - m01, sq) return jnp.stack((x, y, z, w), axis=-1) def cond_1(): sq = _safe_sqrt(1.0 + m00 - m11 - m22 + eps) * 2.0 # sq = 4 * x. w = jnp.divide(m21 - m12, sq) x = 0.25 * sq y = jnp.divide(m01 + m10, sq) z = jnp.divide(m02 + m20, sq) return jnp.stack((x, y, z, w), axis=-1) def cond_2(): sq = _safe_sqrt(1.0 + m11 - m00 - m22 + eps) * 2.0 # sq = 4 * y. w = jnp.divide(m02 - m20, sq) x = jnp.divide(m01 + m10, sq) y = 0.25 * sq z = jnp.divide(m12 + m21, sq) return jnp.stack((x, y, z, w), axis=-1) def cond_3(): sq = _safe_sqrt(1.0 + m22 - m00 - m11 + eps) * 2.0 # sq = 4 * z. w = jnp.divide(m10 - m01, sq) x = jnp.divide(m02 + m20, sq) y = jnp.divide(m12 + m21, sq) z = 0.25 * sq return jnp.stack((x, y, z, w), axis=-1) def cond_idx(cond): cond = jnp.expand_dims(cond, -1) cond = jnp.tile(cond, [1] * (len(m.shape) - 2) + [4]) return cond where_2 = jnp.where(cond_idx(m11 > m22), cond_2(), cond_3()) where_1 = jnp.where(cond_idx((m00 > m11) & (m00 > m22)), cond_1(), where_2) return jnp.where(cond_idx(trace > 0), tr_positive(), where_1) def from_axis_angle( axis_angle, eps = jnp.finfo(jnp.float32).eps ): """Constructs a quaternion for the given axis/angle rotation. Args: axis_angle: A 3-vector where the direction is the axis of rotation and the magnitude is the angle of rotation. eps: A small number used for numerical stability around zero rotations. Returns: A quaternion encoding the same rotation. """ theta_squared = jnp.sum(axis_angle**2, axis=-1) theta = _safe_sqrt(theta_squared) half_theta = theta / 2.0 k = jnp.sin(half_theta) / theta # Avoid evaluating sqrt when theta is close to zero. k = jnp.where(theta_squared > eps**2, k, 0.5) qw = jnp.where(theta_squared > eps**2, jnp.cos(half_theta), 1.0) qx = axis_angle[0] * k qy = axis_angle[1] * k qz = axis_angle[2] * k return jnp.squeeze(jnp.array([qx, qy, qz, qw])) def to_axis_angle( q, eps = jnp.finfo(jnp.float32).eps ): """Converts a quaternion to an axis-angle representation. Args: q: a 4-vector representing a unit quaternion. eps: A small number used for numerical stability around zero rotations. Returns: A 3-vector where the direction is the axis of rotation and the magnitude is the angle of rotation. """ sin_sq_theta = jnp.sum(im(q) ** 2, axis=-1) sin_theta = _safe_sqrt(sin_sq_theta) cos_theta = re(q) # If cos_theta is negative, theta is greater than pi/2, which # means that angle for the angle_axis vector which is 2 * theta # would be greater than pi. # # While this will result in the correct rotation, it does not # result in a normalized angle-axis vector. # # In that case we observe that 2 * theta ~ 2 * theta - 2 * pi, # which is equivalent saying # # theta - pi = atan(sin(theta - pi), cos(theta - pi)) # = atan(-sin(theta), -cos(theta)) two_theta = 2.0 * jnp.where( cos_theta < 0.0, jnp.arctan2(-sin_theta, -cos_theta), jnp.arctan2(sin_theta, cos_theta), ) # For zero rotation, sqrt() will produce NaN in the derivative since # the argument is zero. We avoid this by directly returning the value in # such cases. k = jnp.where(sin_sq_theta > eps**2, two_theta / sin_theta, 2.0) return im(q) * k
evocodebench_data_236
import tqdm import tiktoken import numpy as np from scipy.special import logsumexp import math from functools import partial, reduce from operator import or_ as union from typing import Literal, Optional from concurrent.futures import ThreadPoolExecutor from openlogprobs.models import Model from openlogprobs.utils import batched def exact_solve( model: Model, prefix: str, idx: list[int], bias: float = 5.0, top_logprob: Optional[float] = None, ) -> tuple[dict[int, float], set[int], int]: """Parallel exact solve based on https://mattf1n.github.io/openlogprobs.html""" logit_bias = {i: bias for i in idx} topk_words = model.topk(prefix, logit_bias) if all(i in topk_words for i in idx): biased_logprobs = np.array([topk_words[i] for i in idx]) log_biased_prob = logsumexp(biased_logprobs) logprobs = biased_logprobs - np.logaddexp( bias + np.log1p(-np.exp(log_biased_prob)), log_biased_prob ) return dict(zip(idx, logprobs)), set(), 1 else: if top_logprob is None: missing_tokens = set(idx) - set(topk_words) raise TypeError( f"Tokens {missing_tokens} not in top-k with bias {bias}." "Either increase bias or provide top unbiased logprob (top_logprob)" ) success_idxs = list(i for i in idx if i in topk_words) fail_idxs = set(idx) - set(topk_words) biased_top_logprob = max( logprob for i, logprob in topk_words.items() if i not in idx ) biased_logprobs = np.array([topk_words[i] for i in success_idxs]) logprobs = biased_logprobs - biased_top_logprob + top_logprob - bias return dict(zip(success_idxs, logprobs)), fail_idxs, 1 def bisection_search( model: Model, prefix: str, idx: int, k=1, low=0, high=32, eps=1e-8 ): # check if idx is the argmax num_calls = k if model.argmax(prefix) == idx: return 0, num_calls # initialize high logit_bias = {idx: high} while model.argmax(prefix, logit_bias) != idx: logit_bias[idx] *= 2 num_calls += k high = logit_bias[idx] # improve estimate mid = (high + low) / 2 while high >= low + eps: logit_bias[idx] = mid if model.argmax(prefix, logit_bias) == idx: high = mid else: low = mid mid = (high + low) / 2 num_calls += k return -mid, num_calls def topk_search(model: Model, prefix: str, idx: int, k=1, high=40): # get raw topk, could be done outside and passed in topk_words = model.topk(prefix) highest_idx = list(topk_words.keys())[np.argmax(list(topk_words.values()))] if idx == highest_idx: return topk_words[idx], k num_calls = k # initialize high logit_bias = {idx: high} new_max_idx = model.argmax(prefix, logit_bias) num_calls += k while new_max_idx != idx: logit_bias[idx] *= 2 new_max_idx = model.argmax(prefix, logit_bias) num_calls += k high = logit_bias[idx] output = model.topk(prefix, logit_bias) num_calls += k # compute normalizing constant diff = topk_words[highest_idx] - output[highest_idx] logZ = high - math.log(math.exp(diff) - 1) fv = output[idx] + math.log(math.exp(logZ) + math.exp(high)) - high logprob = fv - logZ return logprob, num_calls def extract_logprobs( model: Model, prefix: str, method: Literal["bisection", "topk", "exact"] = "bisection", k: int = 5, eps: float = 1e-6, multithread: bool = False, bias: float = 5.0, parallel: bool = False, ): vocab_size = model.vocab_size if method == "exact": logprob_dict = model.topk(prefix) top_logprob = max(logprob_dict.values()) bias += top_logprob - min(logprob_dict.values()) remaining = set(range(vocab_size)) - set(logprob_dict) total_calls = 0 if multithread: executor = ThreadPoolExecutor(max_workers=8) map_func = executor.map else: map_func = map while remaining: search_results = map_func( partial( exact_solve, model, prefix, bias=bias, top_logprob=top_logprob, ), batched(remaining, k), ) logprob_dicts, skipped, calls = zip(*search_results) logprob_dict |= reduce(union, logprob_dicts) remaining = set.union(*skipped) total_calls += sum(calls) bias += 5 if multithread: executor.shutdown() logprobs = np.array([logprob_dict[i] for i in range(vocab_size)]) return logprobs, total_calls else: search_func = topk_search if method == "topk" else bisection_search search = partial(search_func, model, prefix, k=k) vocab = list(range(vocab_size)) if multithread: with ThreadPoolExecutor(max_workers=8) as executor: search_results = executor.map(search, tqdm.tqdm(vocab)) else: search_results = map(search, tqdm.tqdm(vocab)) logit_list, calls = zip(*search_results) logits = np.array(logit_list) return logits - logsumexp(logits), sum(calls)
evocodebench_data_237
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """JAX resample implementations.""" import functools import jax import jax.numpy as jnp import numpy as np def gather_volume(data, locations, coordinate_order='xyz'): """Gather from data at locations. Args: data: A [D, H, W, C] tensor. locations: A [D, ..., 3] int32 tensor containing the locations to sample at. coordinate_order: Whether the sample locations are x,y,z or z,y,x. Returns: A [D, ..., C] tensor containing the gathered locations. """ if coordinate_order == 'xyz': x_coordinate = locations[Ellipsis, 0] y_coordinate = locations[Ellipsis, 1] z_coordinate = locations[Ellipsis, 2] elif coordinate_order == 'zyx': z_coordinate = locations[Ellipsis, 0] y_coordinate = locations[Ellipsis, 1] x_coordinate = locations[Ellipsis, 2] # Use Advanced indexing to gather data data. return data[z_coordinate, y_coordinate, x_coordinate] def resample_3d( data, locations, edge_behavior='CONSTANT_OUTSIDE', constant_values=0.0, coordinate_order='xyz', method='TRILINEAR', half_pixel_center=False, ): """Resamples input data at the provided locations from a volume. Args: data: A [D, H, W, C] tensor from which to sample. locations: A [D, ..., 3] containing floating point locations to sample data at. Assumes voxels centers at integer coordinates. edge_behavior: The behaviour for sample points outside of params. -CONSTANT_OUTSIDE: First pads params by 1 with constant_values in the x-y-z dimensions, then clamps samples to this padded tensor. The effect is that sample points interpolate towards the constant value just outside the tensor. -CLAMP: clamps to volume. constant_values: The constant value to use with edge_behvaior 'CONSTANT_OUTSIDE.' coordinate_order: Whether the sample locations are x,y,z or z,y,x. method: The interpolation kernel to use, must be 'TRILINEAR' or 'NEAREST'. half_pixel_center: A bool that determines if half-pixel centering is used. Returns: A tensor of shape [D, ..., C] containing the sampled values. """ assert len(data.shape) >= 3 assert edge_behavior in ['CONSTANT_OUTSIDE', 'CLAMP'] if edge_behavior == 'CONSTANT_OUTSIDE': data = jnp.pad( data, np.array([[1, 1], [1, 1], [1, 1]] + (data.ndim - 3) * [[0, 0]]), constant_values=constant_values, ) locations = locations + 1.0 if method == 'TRILINEAR': # Trilinearly interpolates by finding the weighted sum of the eight corner # points. if half_pixel_center: locations = locations - 0.5 floored = jnp.floor(locations) ceil = floored + 1.0 positions = [ jnp.stack([floored[Ellipsis, 0], floored[Ellipsis, 1], floored[Ellipsis, 2]], axis=-1), jnp.stack([floored[Ellipsis, 0], floored[Ellipsis, 1], ceil[Ellipsis, 2]], axis=-1), jnp.stack([floored[Ellipsis, 0], ceil[Ellipsis, 1], floored[Ellipsis, 2]], axis=-1), jnp.stack([floored[Ellipsis, 0], ceil[Ellipsis, 1], ceil[Ellipsis, 2]], axis=-1), jnp.stack([ceil[Ellipsis, 0], floored[Ellipsis, 1], floored[Ellipsis, 2]], axis=-1), jnp.stack([ceil[Ellipsis, 0], floored[Ellipsis, 1], ceil[Ellipsis, 2]], axis=-1), jnp.stack([ceil[Ellipsis, 0], ceil[Ellipsis, 1], floored[Ellipsis, 2]], axis=-1), jnp.stack([ceil[Ellipsis, 0], ceil[Ellipsis, 1], ceil[Ellipsis, 2]], axis=-1), ] ceil_w = locations - floored floor_w = 1.0 - ceil_w weights = [ floor_w[Ellipsis, 0] * floor_w[Ellipsis, 1] * floor_w[Ellipsis, 2], floor_w[Ellipsis, 0] * floor_w[Ellipsis, 1] * ceil_w[Ellipsis, 2], floor_w[Ellipsis, 0] * ceil_w[Ellipsis, 1] * floor_w[Ellipsis, 2], floor_w[Ellipsis, 0] * ceil_w[Ellipsis, 1] * ceil_w[Ellipsis, 2], ceil_w[Ellipsis, 0] * floor_w[Ellipsis, 1] * floor_w[Ellipsis, 2], ceil_w[Ellipsis, 0] * floor_w[Ellipsis, 1] * ceil_w[Ellipsis, 2], ceil_w[Ellipsis, 0] * ceil_w[Ellipsis, 1] * floor_w[Ellipsis, 2], ceil_w[Ellipsis, 0] * ceil_w[Ellipsis, 1] * ceil_w[Ellipsis, 2], ] elif method == 'NEAREST': # Interpolate into the nearest cell. A weight of `None` is treated as 1. positions = [(jnp.floor if half_pixel_center else jnp.round)(locations)] weights = [None] else: raise ValueError('interpolation method {method} not supported') max_indices = jnp.array(data.shape[:3], dtype=jnp.int32) - 1 if coordinate_order == 'xyz': max_indices = jnp.flip(max_indices) output = jnp.zeros((*locations.shape[:-1], data.shape[-1]), dtype=data.dtype) for position, weight in zip(positions, weights): indexes = position.astype(jnp.int32) indexes = jnp.maximum(indexes, 0) indexes = jnp.minimum(indexes, max_indices) gathered = gather_volume(data, indexes, coordinate_order) weighted_gathered = ( gathered if weight is None else gathered * weight[Ellipsis, None] ) output += weighted_gathered return output.astype(data.dtype)
evocodebench_data_238
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow))))
evocodebench_data_239
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow))))
evocodebench_data_240
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow))))
evocodebench_data_241
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow))))
evocodebench_data_242
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow))))
evocodebench_data_243
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow))))
evocodebench_data_244
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for constructing geodesic polyhedron, which are used as a basis.""" import itertools import numpy as np def compute_sq_dist(mat0, mat1=None): """Compute the squared Euclidean distance between all pairs of columns.""" if mat1 is None: mat1 = mat0 # Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y. sq_norm0 = np.sum(mat0**2, 0) sq_norm1 = np.sum(mat1**2, 0) sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1 sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors. return sq_dist def compute_tesselation_weights(v): """Tesselate the vertices of a triangle by a factor of `v`.""" if v < 1: raise ValueError(f'v {v} must be >= 1') int_weights = [] for i in range(v + 1): for j in range(v + 1 - i): int_weights.append((i, j, v - (i + j))) int_weights = np.array(int_weights) weights = int_weights / v # Barycentric weights. return weights def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4): """Tesselate the vertices of a geodesic polyhedron. Args: base_verts: tensor of floats, the vertex coordinates of the geodesic. base_faces: tensor of ints, the indices of the vertices of base_verts that constitute eachface of the polyhedra. v: int, the factor of the tesselation (v==1 is a no-op). eps: float, a small value used to determine if two vertices are the same. Returns: verts: a tensor of floats, the coordinates of the tesselated vertices. """ if not isinstance(v, int): raise ValueError(f'v {v} must an integer') tri_weights = compute_tesselation_weights(v) verts = [] for base_face in base_faces: new_verts = np.matmul(tri_weights, base_verts[base_face, :]) new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True)) verts.append(new_verts) verts = np.concatenate(verts, 0) sq_dist = compute_sq_dist(verts.T) assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist]) unique = np.unique(assignment) verts = verts[unique, :] return verts def generate_basis( base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4 ): """Generates a 3D basis by tesselating a geometric polyhedron. Args: base_shape: string, the name of the starting polyhedron, must be either 'tetrahedron', 'icosahedron' or 'octahedron'. angular_tesselation: int, the number of times to tesselate the polyhedron, must be >= 1 (a value of 1 is a no-op to the polyhedron). remove_symmetries: bool, if True then remove the symmetric basis columns, which is usually a good idea because otherwise projections onto the basis will have redundant negative copies of each other. eps: float, a small number used to determine symmetries. Returns: basis: a matrix with shape [3, n]. """ if base_shape == 'tetrahedron': verts = np.array([ (np.sqrt(8 / 9), 0, -1 / 3), (-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3), (-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3), (0, 0, 1), ]) faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)]) elif base_shape == 'icosahedron': a = (np.sqrt(5) + 1) / 2 verts = np.array([ (-1, 0, a), (1, 0, a), (-1, 0, -a), (1, 0, -a), (0, a, 1), (0, a, -1), (0, -a, 1), (0, -a, -1), (a, 1, 0), (-a, 1, 0), (a, -1, 0), (-a, -1, 0), ]) / np.sqrt(a + 2) faces = np.array([ (0, 4, 1), (0, 9, 4), (9, 5, 4), (4, 5, 8), (4, 8, 1), (8, 10, 1), (8, 3, 10), (5, 3, 8), (5, 2, 3), (2, 7, 3), (7, 10, 3), (7, 6, 10), (7, 11, 6), (11, 0, 6), (0, 1, 6), (6, 1, 10), (9, 0, 11), (9, 11, 2), (9, 2, 5), (7, 2, 11), ]) elif base_shape == 'octahedron': verts = np.array( [(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)] ) corners = np.array(list(itertools.product([-1, 1], repeat=3))) pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2) faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1) else: raise ValueError(f'base_shape {base_shape} not supported') verts = tesselate_geodesic(verts, faces, angular_tesselation) if remove_symmetries: # Remove elements of `verts` that are reflections of each other. match = compute_sq_dist(verts.T, -verts.T) < eps verts = verts[~np.any(np.triu(match), axis=0), :] basis = verts[:, ::-1] return basis
evocodebench_data_245
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow))))
evocodebench_data_246
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow))))
evocodebench_data_247
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow))))
evocodebench_data_248
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow))))
evocodebench_data_249
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def decorator( fn, ): def result_fn(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thread_fn(): # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: for item in fn(*args, **kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # iterations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating_data and results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # Thread exception will be raised here thread_fn_future.result() return result_fn return decorator
evocodebench_data_250
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Camera pose and ray generation utility functions.""" import enum import functools import types from typing import Final, List, Mapping, Optional, Text, Tuple, TypeAlias from absl import logging import chex from internal import configs from internal import geometry from internal import math from internal import rigid_body from internal import spin_math from internal import stepfun from internal import utils import jax from jax import random import jax.numpy as jnp import jaxcam import numpy as np import scipy _Array: TypeAlias = np.ndarray | jnp.ndarray _ScalarArray: TypeAlias = float | _Array _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD: Final[float] = 0.95 def convert_to_ndc( origins, directions, pixtocam, near = 1.0, xnp = np, ): """Converts a set of rays to normalized device coordinates (NDC). Args: origins: ndarray(float32), [..., 3], world space ray origins. directions: ndarray(float32), [..., 3], world space ray directions. pixtocam: ndarray(float32), [3, 3], inverse intrinsic matrix. near: float, near plane along the negative z axis. xnp: either numpy or jax.numpy. Returns: origins_ndc: ndarray(float32), [..., 3]. directions_ndc: ndarray(float32), [..., 3]. This function assumes input rays should be mapped into the NDC space for a perspective projection pinhole camera, with identity extrinsic matrix (pose) and intrinsic parameters defined by inputs focal, width, and height. The near value specifies the near plane of the frustum, and the far plane is assumed to be infinity. The ray bundle for the identity pose camera will be remapped to parallel rays within the (-1, -1, -1) to (1, 1, 1) cube. Any other ray in the original world space can be remapped as long as it has dz < 0 (ray direction has a negative z-coord); this allows us to share a common NDC space for "forward facing" scenes. Note that projection(origins + t * directions) will NOT be equal to origins_ndc + t * directions_ndc and that the directions_ndc are not unit length. Rather, directions_ndc is defined such that the valid near and far planes in NDC will be 0 and 1. See Appendix C in https://arxiv.org/abs/2003.08934 for additional details. """ # Shift ray origins to near plane, such that oz = -near. # This makes the new near bound equal to 0. t = -(near + origins[Ellipsis, 2]) / directions[Ellipsis, 2] origins = origins + t[Ellipsis, None] * directions dx, dy, dz = xnp.moveaxis(directions, -1, 0) ox, oy, oz = xnp.moveaxis(origins, -1, 0) xmult = 1.0 / pixtocam[0, 2] # Equal to -2. * focal / cx ymult = 1.0 / pixtocam[1, 2] # Equal to -2. * focal / cy # Perspective projection into NDC for the t = 0 near points # origins + 0 * directions origins_ndc = xnp.stack( [xmult * ox / oz, ymult * oy / oz, -xnp.ones_like(oz)], axis=-1 ) # Perspective projection into NDC for the t = infinity far points # origins + infinity * directions infinity_ndc = xnp.stack( [xmult * dx / dz, ymult * dy / dz, xnp.ones_like(oz)], axis=-1 ) # directions_ndc points from origins_ndc to infinity_ndc directions_ndc = infinity_ndc - origins_ndc return origins_ndc, directions_ndc def pad_poses(p): """Pad [..., 3, 4] pose matrices with a homogeneous bottom row [0,0,0,1].""" bottom = np.broadcast_to([0, 0, 0, 1.0], p[Ellipsis, :1, :4].shape) return np.concatenate([p[Ellipsis, :3, :4], bottom], axis=-2) def unpad_poses(p): """Remove the homogeneous bottom row from [..., 4, 4] pose matrices.""" return p[Ellipsis, :3, :4] def recenter_poses(poses): """Recenter poses around the origin.""" cam2world = average_pose(poses) transform = np.linalg.inv(pad_poses(cam2world)) poses = transform @ pad_poses(poses) return unpad_poses(poses), transform def average_pose(poses, lock_up = False): """New pose using average position, z-axis, and up vector of input poses.""" position = poses[:, :3, 3].mean(0) z_axis = poses[:, :3, 2].mean(0) up = poses[:, :3, 1].mean(0) cam2world = viewmatrix(z_axis, up, position, lock_up=lock_up) return cam2world def viewmatrix( lookdir, up, position, lock_up = False, ): """Construct lookat view matrix.""" orthogonal_dir = lambda a, b: normalize(np.cross(a, b)) vecs = [None, normalize(up), normalize(lookdir)] # x-axis is always the normalized cross product of `lookdir` and `up`. vecs[0] = orthogonal_dir(vecs[1], vecs[2]) # Default is to lock `lookdir` vector, if lock_up is True lock `up` instead. ax = 2 if lock_up else 1 # Set the not-locked axis to be orthogonal to the other two. vecs[ax] = orthogonal_dir(vecs[(ax + 1) % 3], vecs[(ax + 2) % 3]) m = np.stack(vecs + [position], axis=1) return m def rotation_about_axis(degrees, axis=0): """Creates rotation matrix about one of the coordinate axes.""" radians = degrees / 180.0 * np.pi rot2x2 = np.array( [[np.cos(radians), -np.sin(radians)], [np.sin(radians), np.cos(radians)]] ) r = np.eye(3) r[1:3, 1:3] = rot2x2 r = np.roll(np.roll(r, axis, axis=0), axis, axis=1) p = np.eye(4) p[:3, :3] = r return p def normalize(x): """Normalization helper function.""" return x / np.linalg.norm(x) def focus_point_fn(poses, xnp = np): """Calculate nearest point to all focal axes in poses.""" directions, origins = poses[:, :3, 2:3], poses[:, :3, 3:4] m = xnp.eye(3) - directions * xnp.transpose(directions, [0, 2, 1]) mt_m = xnp.transpose(m, [0, 2, 1]) @ m focus_pt = xnp.linalg.inv(mt_m.mean(0)) @ (mt_m @ origins).mean(0)[:, 0] return focus_pt # Constants for generate_spiral_path(): NEAR_STRETCH = 0.9 # Push forward near bound for forward facing render path. FAR_STRETCH = 5.0 # Push back far bound for forward facing render path. FOCUS_DISTANCE = 0.75 # Relative weighting of near, far bounds for render path. def generate_spiral_path( poses, bounds, n_frames = 120, n_rots = 2, zrate = 0.5, ): """Calculates a forward facing spiral path for rendering.""" # Find a reasonable 'focus depth' for this dataset as a weighted average # of conservative near and far bounds in disparity space. near_bound = bounds.min() * NEAR_STRETCH far_bound = bounds.max() * FAR_STRETCH # All cameras will point towards the world space point (0, 0, -focal). focal = 1 / (((1 - FOCUS_DISTANCE) / near_bound + FOCUS_DISTANCE / far_bound)) # Get radii for spiral path using 90th percentile of camera positions. positions = poses[:, :3, 3] radii = np.percentile(np.abs(positions), 90, 0) radii = np.concatenate([radii, [1.0]]) # Generate poses for spiral path. render_poses = [] cam2world = average_pose(poses) up = poses[:, :3, 1].mean(0) for theta in np.linspace(0.0, 2.0 * np.pi * n_rots, n_frames, endpoint=False): t = radii * [np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.0] position = cam2world @ t lookat = cam2world @ [0, 0, -focal, 1.0] z_axis = position - lookat render_poses.append(viewmatrix(z_axis, up, position)) render_poses = np.stack(render_poses, axis=0) return render_poses def transform_poses_pca(poses): """Transforms poses so principal components lie on XYZ axes. Args: poses: a (N, 3, 4) array containing the cameras' camera to world transforms. Returns: A tuple (poses, transform), with the transformed poses and the applied camera_to_world transforms. """ t = poses[:, :3, 3] t_mean = t.mean(axis=0) t = t - t_mean eigval, eigvec = np.linalg.eig(t.T @ t) # Sort eigenvectors in order of largest to smallest eigenvalue. inds = np.argsort(eigval)[::-1] eigvec = eigvec[:, inds] rot = eigvec.T if np.linalg.det(rot) < 0: rot = np.diag(np.array([1, 1, -1])) @ rot transform = np.concatenate([rot, rot @ -t_mean[:, None]], -1) poses_recentered = unpad_poses(transform @ pad_poses(poses)) transform = np.concatenate([transform, np.eye(4)[3:]], axis=0) # Flip coordinate system if z component of y-axis is negative if poses_recentered.mean(axis=0)[2, 1] < 0: poses_recentered = np.diag(np.array([1, -1, -1])) @ poses_recentered transform = np.diag(np.array([1, -1, -1, 1])) @ transform # Just make sure it's it in the [-1, 1]^3 cube scale_factor = 1.0 / np.max(np.abs(poses_recentered[:, :3, 3])) poses_recentered[:, :3, 3] *= scale_factor transform = np.diag(np.array([scale_factor] * 3 + [1])) @ transform return poses_recentered, transform def transform_poses_focus(poses): """Transforms poses so that the "focus point" of capture is at the origin. Args: poses: a (N, 3, 4) array containing the cameras' camera to world transforms. Returns: A tuple (poses, transform), with the transformed poses and the applied camera_to_world transforms. """ # Move the focus point to the origin. focus_point = focus_point_fn(poses) # Use average up vector as the Z axis. swap_y_z = np.array([ [1, 0, 0], [0, 0, 1], [0, -1, 0.0], ]) rot = average_pose(poses, lock_up=True)[:3, :3] @ swap_y_z transform = np.concatenate([rot.T, rot.T @ -focus_point[:, None]], -1) poses_recentered = transform @ pad_poses(poses) transform = np.concatenate([transform, np.eye(4)[3:]], axis=0) # Just make sure it's it in the [-1, 1]^3 cube scale_factor = 1.0 / np.max(np.abs(poses_recentered[:, :3, 3])) poses_recentered[:, :3, 3] *= scale_factor transform = np.diag(np.array([scale_factor] * 3 + [1])) @ transform return poses_recentered, transform def generate_ellipse_path( poses, n_frames = 120, const_speed = True, z_variation = 0.0, z_phase = 0.0, rad_mult_min = 1.0, rad_mult_max = 1.0, render_rotate_xaxis = 0.0, render_rotate_yaxis = 0.0, use_avg_z_height = False, z_height_percentile = None, lock_up = False, ): """Generate an elliptical render path based on the given poses.""" # Calculate the focal point for the path (cameras point toward this). center = focus_point_fn(poses) # Default path height sits at z=0 (in middle of zero-mean capture pattern). xy_offset = center[:2] # Calculate lengths for ellipse axes based on input camera positions. xy_radii = np.percentile(np.abs(poses[:, :2, 3] - xy_offset), 90, axis=0) # Use ellipse that is symmetric about the focal point in xy. xy_low = xy_offset - xy_radii xy_high = xy_offset + xy_radii # Optional height variation, need not be symmetric. z_min = np.percentile((poses[:, 2, 3]), 10, axis=0) z_max = np.percentile((poses[:, 2, 3]), 90, axis=0) if use_avg_z_height or z_height_percentile is not None: # Center the path vertically around the average camera height, good for # datasets recentered by transform_poses_focus function. if z_height_percentile is None: z_init = poses[:, 2, 3].mean(axis=0) else: z_init = np.percentile(poses[:, 2, 3], z_height_percentile, axis=0) else: # Center the path at zero, good for datasets recentered by # transform_poses_pca function. z_init = 0 z_low = z_init + z_variation * (z_min - z_init) z_high = z_init + z_variation * (z_max - z_init) xyz_low = np.array([*xy_low, z_low]) xyz_high = np.array([*xy_high, z_high]) def get_positions(theta): # Interpolate between bounds with trig functions to get ellipse in x-y. # Optionally also interpolate in z to change camera height along path. t_x = np.cos(theta) * 0.5 + 0.5 t_y = np.sin(theta) * 0.5 + 0.5 t_z = np.cos(theta + 2 * np.pi * z_phase) * 0.5 + 0.5 t_xyz = np.stack([t_x, t_y, t_z], axis=-1) positions = xyz_low + t_xyz * (xyz_high - xyz_low) # Interpolate between min and max radius multipliers so the camera zooms in # and out of the scene center. t = np.sin(theta) * 0.5 + 0.5 rad_mult = rad_mult_min + (rad_mult_max - rad_mult_min) * t positions = center + (positions - center) * rad_mult[:, None] return positions theta = np.linspace(0, 2.0 * np.pi, n_frames + 1, endpoint=True) positions = get_positions(theta) if const_speed: # Resample theta angles so that the velocity is closer to constant. lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1) theta = stepfun.sample(None, theta, np.log(lengths), n_frames + 1) positions = get_positions(theta) # Throw away duplicated last position. positions = positions[:-1] # Set path's up vector to axis closest to average of input pose up vectors. avg_up = poses[:, :3, 1].mean(0) avg_up = avg_up / np.linalg.norm(avg_up) ind_up = np.argmax(np.abs(avg_up)) up = np.eye(3)[ind_up] * np.sign(avg_up[ind_up]) poses = np.stack([viewmatrix(p - center, up, p, lock_up) for p in positions]) poses = poses @ rotation_about_axis(-render_rotate_yaxis, axis=1) poses = poses @ rotation_about_axis(render_rotate_xaxis, axis=0) return poses def generate_interpolated_path( poses, n_interp, spline_degree = 5, smoothness = 0.03, rot_weight = 0.1, lock_up = False, fixed_up_vector = None, lookahead_i = None, frames_per_colmap = None, const_speed = False, n_buffer = None, periodic = False, n_interp_as_total = False, ): """Creates a smooth spline path between input keyframe camera poses. Spline is calculated with poses in format (position, lookat-point, up-point). Args: poses: (n, 3, 4) array of input pose keyframes. n_interp: returned path will have n_interp * (n - 1) total poses. spline_degree: polynomial degree of B-spline. smoothness: parameter for spline smoothing, 0 forces exact interpolation. rot_weight: relative weighting of rotation/translation in spline solve. lock_up: if True, forced to use given Up and allow Lookat to vary. fixed_up_vector: replace the interpolated `up` with a fixed vector. lookahead_i: force the look direction to look at the pose `i` frames ahead. frames_per_colmap: conversion factor for the desired average velocity. const_speed: renormalize spline to have constant delta between each pose. n_buffer: Number of buffer frames to insert at the start and end of the path. Helps keep the ends of a spline path straight. periodic: make the spline path periodic (perfect loop). n_interp_as_total: use n_interp as total number of poses in path rather than the number of poses to interpolate between each input. Returns: Array of new camera poses with shape (n_interp * (n - 1), 3, 4), or (n_interp, 3, 4) if n_interp_as_total is set. """ def poses_to_points(poses, dist): """Converts from pose matrices to (position, lookat, up) format.""" pos = poses[:, :3, -1] lookat = poses[:, :3, -1] - dist * poses[:, :3, 2] up = poses[:, :3, -1] + dist * poses[:, :3, 1] return np.stack([pos, lookat, up], 1) def points_to_poses(points): """Converts from (position, lookat, up) format to pose matrices.""" poses = [] for i in range(len(points)): pos, lookat_point, up_point = points[i] if lookahead_i is not None: if i + lookahead_i < len(points): lookat = pos - points[i + lookahead_i][0] else: lookat = pos - lookat_point up = (up_point - pos) if fixed_up_vector is None else fixed_up_vector poses.append(viewmatrix(lookat, up, pos, lock_up=lock_up)) return np.array(poses) def insert_buffer_poses(poses, n_buffer): """Insert extra poses at the start and end of the path.""" def average_distance(points): distances = np.linalg.norm(points[1:] - points[0:-1], axis=-1) return np.mean(distances) def shift(pose, dz): result = np.copy(pose) z = result[:3, 2] z /= np.linalg.norm(z) # Move along forward-backward axis. -z is forward. result[:3, 3] += z * dz return result dz = average_distance(poses[:, :3, 3]) prefix = np.stack([shift(poses[0], (i + 1) * dz) for i in range(n_buffer)]) prefix = prefix[::-1] # reverse order suffix = np.stack( [shift(poses[-1], -(i + 1) * dz) for i in range(n_buffer)] ) result = np.concatenate([prefix, poses, suffix]) return result def remove_buffer_poses(poses, u, n_frames, u_keyframes, n_buffer): u_keyframes = u_keyframes[n_buffer:-n_buffer] mask = (u >= u_keyframes[0]) & (u <= u_keyframes[-1]) poses = poses[mask] u = u[mask] n_frames = len(poses) return poses, u, n_frames, u_keyframes def interp(points, u, k, s): """Runs multidimensional B-spline interpolation on the input points.""" sh = points.shape pts = np.reshape(points, (sh[0], -1)) k = min(k, sh[0] - 1) tck, u_keyframes = scipy.interpolate.splprep(pts.T, k=k, s=s, per=periodic) new_points = np.array(scipy.interpolate.splev(u, tck)) new_points = np.reshape(new_points.T, (len(u), sh[1], sh[2])) return new_points, u_keyframes if n_buffer is not None: poses = insert_buffer_poses(poses, n_buffer) points = poses_to_points(poses, dist=rot_weight) if n_interp_as_total: n_frames = n_interp + 1 # Add extra since final pose is discarded. else: n_frames = n_interp * (points.shape[0] - 1) u = np.linspace(0, 1, n_frames, endpoint=True) new_points, u_keyframes = interp(points, u=u, k=spline_degree, s=smoothness) poses = points_to_poses(new_points) if n_buffer is not None: poses, u, n_frames, u_keyframes = remove_buffer_poses( poses, u, n_frames, u_keyframes, n_buffer ) if frames_per_colmap is not None: # Recalculate the number of frames to achieve desired average velocity. positions = poses[:, :3, -1] lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1) total_length_colmap = lengths.sum() print('old n_frames:', n_frames) print('total_length_colmap:', total_length_colmap) n_frames = int(total_length_colmap * frames_per_colmap) print('new n_frames:', n_frames) u = np.linspace( np.min(u_keyframes), np.max(u_keyframes), n_frames, endpoint=True ) new_points, _ = interp(points, u=u, k=spline_degree, s=smoothness) poses = points_to_poses(new_points) if const_speed: # Resample timesteps so that the velocity is nearly constant. positions = poses[:, :3, -1] lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1) u = stepfun.sample(None, u, np.log(lengths), n_frames + 1) new_points, _ = interp(points, u=u, k=spline_degree, s=smoothness) poses = points_to_poses(new_points) return poses[:-1], u[:-1], u_keyframes def safe_interpolate_1d( x, spline_degree, smoothness, t_input, t_output, ): """Interpolate 1d signal x (defined at t_input and queried at t_output).""" # TODO(bmild): switch interpolation t values to match those chosen for path. # One needs at least n=k+1 points to fit a polynomial of degree k to n points. n = len(x) spline_degree = min(spline_degree, n - 1) if spline_degree > 0: tck = scipy.interpolate.splrep(t_input, x, s=smoothness, k=spline_degree) return scipy.interpolate.splev(t_output, tck).astype(x.dtype) else: # n = 0 or 1 fill_value = x[0] if n else 0.0 return np.full(t_output.shape, fill_value, dtype=x.dtype) def identify_file_names(dir_or_text_file): """Load filenames from text file or directory.""" if utils.isdir(dir_or_text_file): # If `dir_or_text_file` is a directory, grab the filenames. subset_names = sorted(utils.listdir(dir_or_text_file)) else: # If `dir_or_text_file` is a text file, treat each line as a filename. with utils.open_file(dir_or_text_file, 'r') as fp: names = fp.read() if isinstance(names, bytes): names = names.decode('utf-8') # Decode bytes into string and split into lines. subset_names = names.splitlines() return subset_names def identify_file_indices( dir_or_text_file, file_names ): """Computes indices for a subset of files out of a larger list.""" # Load file names. subset_names = identify_file_names(dir_or_text_file) # COLMAP sometimes doesn't reconstruct all images, which results in some files # being missing. if not set(subset_names).issubset(file_names): subset_names_missing_from_file_names = set(subset_names) - set(file_names) logging.warning( 'Some files from subset are missing in the file names:\n%s', ' '.join(str(x) for x in subset_names_missing_from_file_names), ) missing_subset_names_threshold = len( subset_names_missing_from_file_names ) / len(subset_names) if ( missing_subset_names_threshold > _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD ): raise ValueError( f'{missing_subset_names_threshold*100}% of subset files is missing' f' from file_names: {subset_names_missing_from_file_names}' ) file_names_set = set(file_names) # Get indices corresponding to the subset filenames. Ensure that the order # used in subset_names is preserved. indices = [file_names.index(n) for n in subset_names if n in file_names_set] indices = np.array(indices) return indices def get_meters_per_colmap_from_calibration_images( config, poses, image_names ): """Uses calibration images to get how many meters is a single COLMAP unit.""" # By default, the input camera poses are scaled to fit in the [-1, 1]^3 cube. # This default value implies a scaling of 2 / .25 = 8 meters between the # farthest apart camera poses. meters_per_colmap = 8.0 if config.render_calibration_keyframes is not None: # Use provided calibration keyframes to determine metric world scale. calib_names = identify_file_names(config.render_calibration_keyframes) indices = [] for i in range(0, len(calib_names), 2): # Grab pairs of calibration images filenames. name0, name1 = calib_names[i : i + 2] # Check if both are in the set of colmap-posed images. if name0 in image_names and name1 in image_names: indices.append((image_names.index(name0), image_names.index(name1))) if indices: # Extract colmap-space positions from the camera pose matrices. positions = poses[indices][Ellipsis, :3, -1] # Every pair of calibration keyframes should have world space distance # `render_calibration_distance` according to the capture handbook. colmap_lengths = np.linalg.norm( positions[:, 0] - positions[:, 1], axis=-1 ) colmap_length = colmap_lengths.mean(axis=0) # Ratio of world distance to colmap distance. meters_per_colmap = config.render_calibration_distance / colmap_length print('colmap lengths', colmap_lengths) print('avg', colmap_length) print('meters_per_colmap', meters_per_colmap) return meters_per_colmap def calibrate_spline_speed( config, poses, image_names ): """Uses input config to determine a conversion factor for the spline speed.""" if config.render_spline_meters_per_sec is None: return None meters_per_colmap = get_meters_per_colmap_from_calibration_images( config, poses, image_names ) meters_per_sec = config.render_spline_meters_per_sec frames_per_sec = config.render_video_fps frames_per_colmap = meters_per_colmap / meters_per_sec * frames_per_sec print('returning frames_per_colmap', frames_per_colmap) return frames_per_colmap def create_render_spline_path( config, image_names, poses, exposures, ): """Creates spline interpolation render path from subset of dataset poses. Args: config: configs.Config object. image_names: a list of image filenames. poses: [N, 3, 4] array of extrinsic camera pose matrices. exposures: optional list of floating point exposure values. Returns: spline_indices: list of indices used to select spline keyframe poses. render_poses: array of interpolated extrinsic camera poses for the path. render_exposures: optional list of interpolated exposures for the path. """ def remove_outlier_spline_indices( spline_indices, poses, q_max, q_mult ): """Identify spline indices correspond to inlier poses.""" poses = poses[spline_indices] points = poses[:, :3, -1] distances = np.linalg.norm(points[1:] - points[:-1], axis=-1) mask = distances < q_mult * np.quantile(distances, q_max) mask = np.concatenate([mask, [True]], axis=0) # Keep the last pose. num_inliers = int(np.sum(mask)) num_total = len(spline_indices) print( f'remove_outlier_spline_indices: {num_inliers}/{num_total} spline ' 'path poses remaining after outlier removal.' ) return spline_indices[mask] # Grab poses corresponding to the image filenames. spline_indices = identify_file_indices( config.render_spline_keyframes, image_names ) if ( config.render_spline_outlier_keyframe_quantile is not None and config.render_spline_outlier_keyframe_multiplier is not None ): spline_indices = remove_outlier_spline_indices( spline_indices, poses, q_max=config.render_spline_outlier_keyframe_quantile, q_mult=config.render_spline_outlier_keyframe_multiplier, ) keyframes = poses[spline_indices] frames_per_colmap = calibrate_spline_speed(config, poses, image_names) if config.render_spline_fixed_up: # Fix path to use world-space "up" vector instead of "banking" with spline. all_up_vectors = poses[:, :3, 1] # second column of pose matrix is up. fixed_up_vector = normalize(all_up_vectors.mean(axis=0)) else: fixed_up_vector = None render_poses, frame_timesteps, keyframe_timesteps = ( generate_interpolated_path( keyframes, n_interp=config.render_spline_n_interp, spline_degree=config.render_spline_degree, smoothness=config.render_spline_smoothness, rot_weight=config.render_spline_rot_weight, lock_up=config.render_spline_lock_up, fixed_up_vector=fixed_up_vector, lookahead_i=config.render_spline_lookahead_i, frames_per_colmap=frames_per_colmap, const_speed=config.render_spline_const_speed, n_buffer=config.render_spline_n_buffer, ) ) if config.render_spline_interpolate_exposure: if exposures is None: raise ValueError( 'config.render_spline_interpolate_exposure is True but ' 'create_render_spline_path() was passed exposures=None.' ) # Interpolate per-frame exposure value. log_exposure = np.log(exposures[spline_indices]) # Use aggressive smoothing for exposure interpolation to avoid flickering. log_exposure_interp = safe_interpolate_1d( log_exposure, spline_degree=5, smoothness=config.render_spline_interpolate_exposure_smoothness, t_input=keyframe_timesteps, t_output=frame_timesteps, ) render_exposures = np.exp(log_exposure_interp) else: render_exposures = None return spline_indices, render_poses, render_exposures def intrinsic_matrix( fx, fy, cx, cy, xnp = np, ): """Intrinsic matrix for a pinhole camera in OpenCV coordinate system.""" return xnp.array([ [fx, 0, cx], [0, fy, cy], [0, 0, 1.0], ]) def get_pixtocam( focal, width, height, xnp = np, ): """Inverse intrinsic matrix for a perfect pinhole camera.""" camtopix = intrinsic_matrix(focal, focal, width * 0.5, height * 0.5, xnp) return xnp.linalg.inv(camtopix) def pixel_coordinates( width, height, xnp = np ): """Tuple of the x and y integer coordinates for a grid of pixels.""" return xnp.meshgrid(xnp.arange(width), xnp.arange(height), indexing='xy') def _radial_and_tangential_distort( x, y, k1 = 0, k2 = 0, k3 = 0, k4 = 0, p1 = 0, p2 = 0, ): """Computes the distorted pixel positions.""" r2 = x * x + y * y radial_distortion = r2 * (k1 + r2 * (k2 + r2 * (k3 + r2 * k4))) dx_radial = x * radial_distortion dy_radial = y * radial_distortion dx_tangential = 2 * p1 * x * y + p2 * (r2 + 2 * x * x) dy_tangential = 2 * p2 * x * y + p1 * (r2 + 2 * y * y) return x + dx_radial + dx_tangential, y + dy_radial + dy_tangential def _compute_residual_and_jacobian( x, y, xd, yd, k1 = 0.0, k2 = 0.0, k3 = 0.0, k4 = 0.0, p1 = 0.0, p2 = 0.0, ): """Auxiliary function of radial_and_tangential_undistort().""" # Adapted from https://github.com/google/nerfies/blob/main/nerfies/camera.py # let r(x, y) = x^2 + y^2; # d(x, y) = 1 + k1 * r(x, y) + k2 * r(x, y) ^2 + k3 * r(x, y)^3 + # k4 * r(x, y)^4; r = x * x + y * y d = 1.0 + r * (k1 + r * (k2 + r * (k3 + r * k4))) # The perfect projection is: # xd = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2); # yd = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2); # # Let's define # # fx(x, y) = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2) - xd; # fy(x, y) = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2) - yd; # # We are looking for a solution that satisfies # fx(x, y) = fy(x, y) = 0; fx = d * x + 2 * p1 * x * y + p2 * (r + 2 * x * x) - xd fy = d * y + 2 * p2 * x * y + p1 * (r + 2 * y * y) - yd # Compute derivative of d over [x, y] d_r = k1 + r * (2.0 * k2 + r * (3.0 * k3 + r * 4.0 * k4)) d_x = 2.0 * x * d_r d_y = 2.0 * y * d_r # Compute derivative of fx over x and y. fx_x = d + d_x * x + 2.0 * p1 * y + 6.0 * p2 * x fx_y = d_y * x + 2.0 * p1 * x + 2.0 * p2 * y # Compute derivative of fy over x and y. fy_x = d_x * y + 2.0 * p2 * y + 2.0 * p1 * x fy_y = d + d_y * y + 2.0 * p2 * x + 6.0 * p1 * y return fx, fy, fx_x, fx_y, fy_x, fy_y def _radial_and_tangential_undistort( xd, yd, k1 = 0, k2 = 0, k3 = 0, k4 = 0, p1 = 0, p2 = 0, eps = 1e-9, max_iterations=10, xnp = np, ): """Computes undistorted (x, y) from (xd, yd).""" # From https://github.com/google/nerfies/blob/main/nerfies/camera.py # Initialize from the distorted point. x = xnp.copy(xd) y = xnp.copy(yd) for _ in range(max_iterations): fx, fy, fx_x, fx_y, fy_x, fy_y = _compute_residual_and_jacobian( x=x, y=y, xd=xd, yd=yd, k1=k1, k2=k2, k3=k3, k4=k4, p1=p1, p2=p2 ) denominator = fy_x * fx_y - fx_x * fy_y x_numerator = fx * fy_y - fy * fx_y y_numerator = fy * fx_x - fx * fy_x step_x = xnp.where( xnp.abs(denominator) > eps, x_numerator / denominator, xnp.zeros_like(denominator), ) step_y = xnp.where( xnp.abs(denominator) > eps, y_numerator / denominator, xnp.zeros_like(denominator), ) x = x + step_x y = y + step_y return x, y class ProjectionType(enum.Enum): """Camera projection type (perspective pinhole, fisheye, or 360 pano).""" PERSPECTIVE = 'perspective' FISHEYE = 'fisheye' PANORAMIC = 'pano' def pixels_to_rays( pix_x_int, pix_y_int, pixtocams, camtoworlds, distortion_params = None, pixtocam_ndc = None, camtype = ProjectionType.PERSPECTIVE, xnp = np, ): """Calculates rays given pixel coordinates, intrinisics, and extrinsics. Given 2D pixel coordinates pix_x_int, pix_y_int for cameras with inverse intrinsics pixtocams and extrinsics camtoworlds (and optional distortion coefficients distortion_params and NDC space projection matrix pixtocam_ndc), computes the corresponding 3D camera rays. Vectorized over the leading dimensions of the first four arguments. Args: pix_x_int: int array, shape SH, x coordinates of image pixels. pix_y_int: int array, shape SH, y coordinates of image pixels. pixtocams: float array, broadcastable to SH + [3, 3], inverse intrinsics. camtoworlds: float array, broadcastable to SH + [3, 4], camera extrinsics. distortion_params: dict of floats, optional camera distortion parameters. pixtocam_ndc: float array, [3, 3], optional inverse intrinsics for NDC. camtype: camera_utils.ProjectionType, fisheye or perspective camera. xnp: either numpy or jax.numpy. Returns: origins: float array, shape SH + [3], ray origin points. directions: float array, shape SH + [3], ray direction vectors. viewdirs: float array, shape SH + [3], normalized ray direction vectors. radii: float array, shape SH + [1], ray differential radii. imageplane: float array, shape SH + [2], xy coordinates on the image plane. If the image plane is at world space distance 1 from the pinhole, then imageplane will be the xy coordinates of a pixel in that space (so the camera ray direction at the origin would be (x, y, -1) in OpenGL coords). """ # Must add half pixel offset to shoot rays through pixel centers. def pix_to_dir(x, y): return xnp.stack([x + 0.5, y + 0.5, xnp.ones_like(x)], axis=-1) # We need the dx and dy rays to calculate ray radii for mip-NeRF cones. pixel_dirs_stacked = xnp.stack( [ pix_to_dir(pix_x_int, pix_y_int), pix_to_dir(pix_x_int + 1, pix_y_int), pix_to_dir(pix_x_int, pix_y_int + 1), ], axis=0, ) # For jax, need to specify high-precision matmul. matmul = math.matmul if xnp == jnp else xnp.matmul mat_vec_mul = lambda A, b: matmul(A, b[Ellipsis, None])[Ellipsis, 0] # Apply inverse intrinsic matrices. camera_dirs_stacked = mat_vec_mul(pixtocams, pixel_dirs_stacked) if distortion_params is not None: # Correct for distortion. x, y = _radial_and_tangential_undistort( camera_dirs_stacked[Ellipsis, 0], camera_dirs_stacked[Ellipsis, 1], **distortion_params, xnp=xnp, ) camera_dirs_stacked = xnp.stack([x, y, xnp.ones_like(x)], -1) if camtype == ProjectionType.FISHEYE: theta = xnp.sqrt(xnp.sum(xnp.square(camera_dirs_stacked[Ellipsis, :2]), axis=-1)) theta = xnp.minimum(xnp.pi, theta) sin_theta_over_theta = xnp.sin(theta) / theta camera_dirs_stacked = xnp.stack( [ camera_dirs_stacked[Ellipsis, 0] * sin_theta_over_theta, camera_dirs_stacked[Ellipsis, 1] * sin_theta_over_theta, xnp.cos(theta), ], axis=-1, ) elif camtype == ProjectionType.PANORAMIC: theta = camera_dirs_stacked[Ellipsis, 0] phi = camera_dirs_stacked[Ellipsis, 1] # Negation on y and z components accounts for expected OpenCV convention. camera_dirs_stacked = xnp.stack( [ -xnp.sin(phi) * xnp.sin(theta), -xnp.cos(phi), -xnp.sin(phi) * xnp.cos(theta), ], axis=-1, ) # Flip from OpenCV to OpenGL coordinate system. camera_dirs_stacked = matmul( camera_dirs_stacked, xnp.diag(xnp.array([1.0, -1.0, -1.0])) ) # Extract 2D image plane (x, y) coordinates. imageplane = camera_dirs_stacked[0, Ellipsis, :2] # Apply camera rotation matrices. directions_stacked = mat_vec_mul( camtoworlds[Ellipsis, :3, :3], camera_dirs_stacked ) # Extract the offset rays. directions, dx, dy = directions_stacked origins = xnp.broadcast_to(camtoworlds[Ellipsis, :3, -1], directions.shape) viewdirs = directions / xnp.linalg.norm(directions, axis=-1, keepdims=True) if pixtocam_ndc is None: # Distance from each unit-norm direction vector to its neighbors. dx_norm = xnp.linalg.norm(dx - directions, axis=-1) dy_norm = xnp.linalg.norm(dy - directions, axis=-1) else: # Convert ray origins and directions into projective NDC space. ndc_fn = functools.partial(convert_to_ndc, pixtocam=pixtocam_ndc, xnp=xnp) origins_dx, _ = ndc_fn(origins, dx) origins_dy, _ = ndc_fn(origins, dy) origins, directions = ndc_fn(origins, directions) # In NDC space, we use the offset between origins instead of directions. dx_norm = xnp.linalg.norm(origins_dx - origins, axis=-1) dy_norm = xnp.linalg.norm(origins_dy - origins, axis=-1) # Cut the distance in half, multiply it to match the variance of a uniform # distribution the size of a pixel (1/12, see paper). # TODO(barron): Add a unit test that this is correct. radii = (0.5 * (dx_norm + dy_norm))[Ellipsis, None] * 2 / xnp.sqrt(12) return origins, directions, viewdirs, radii, imageplane def points_to_pixels( points, pixtocams, camtoworlds, distortion_params = None, camtype = ProjectionType.PERSPECTIVE, xnp = np, ): """Calculates pixel coordinates given 3D points, intrinisics, and extrinsics. Given 3D point coordinates points and cameras with inverse intrinsics pixtocams and extrinsics camtoworlds (and optional distortion coefficients distortion_params), computes the corresponding 2D pixel coordinates. Vectorized over the leading dimensions of the first four arguments. Args: points: float array, [..., 3], 3D coordinates of points to project. pixtocams: float array, [..., 3, 3], inverse intrinsics. camtoworlds: float array, [..., 3, 4], camera extrinsics. distortion_params: dict of floats or float arrays [...], optional camera distortion parameters. camtype: camera_utils.ProjectionType, type of camera model. xnp: either numpy (host compute) or jax.numpy (device compute). Returns: coordinates: float array, [..., 2], pixel coordinates. depth: float array, [...], per-point orthographic depth. """ if camtype != ProjectionType.PERSPECTIVE: raise ValueError(f'points_to_pixels only supports perspective projection, ' f'not {camtype} mode.') # For jax, need to specify high-precision matmul. matmul = math.matmul if xnp == jnp else xnp.matmul mat_vec_mul = lambda A, b: matmul(A, b[Ellipsis, None])[Ellipsis, 0] rotation = camtoworlds[Ellipsis, :3, :3] rotation_inv = xnp.swapaxes(rotation, -1, -2) translation = camtoworlds[Ellipsis, :3, -1] # Points (directions) in the camera coordinate frame. points_camera = mat_vec_mul(rotation_inv, points - translation) # Projection to image plane by dividing out -z. depth = -points_camera[Ellipsis, -1] camera_dirs = points_camera / depth[Ellipsis, None] # OpenGL to OpenCV coordinates. camera_dirs = matmul(camera_dirs, xnp.diag(xnp.array([1.0, -1.0, -1.0]))) if distortion_params is not None: # Correct for distortion. x, y = _radial_and_tangential_distort( camera_dirs[Ellipsis, 0], camera_dirs[Ellipsis, 1], **distortion_params, ) camera_dirs = xnp.stack([x, y, xnp.ones_like(x)], -1) # Apply intrinsics matrix. pixel_dirs = mat_vec_mul(xnp.linalg.inv(pixtocams), camera_dirs) # Remove half pixel offset. coordinates = pixel_dirs[Ellipsis, :2] - xnp.array([0.5, 0.5]) return coordinates, depth def rays_planes_intersection( z_min, z_max, origins, directions, xnp = np, ): """Crops rays to a range of z values. This is useful for situations where the scene lies within a range of altitudes, but the cameras are very far away, as with aerial data. Args: z_min: float z value of the lower cropping plane. z_max: float z value of the upper cropping plane. origins: ray origins points. directions: ray direction vectors. xnp: either numpy or jax.numpy. Returns: t_min: parametric location of the cropped ray origins t_max: parametric location of the ends of the cropped rays """ t1 = (z_min - origins[Ellipsis, 2]) / directions[Ellipsis, 2] t2 = (z_max - origins[Ellipsis, 2]) / directions[Ellipsis, 2] t_min = xnp.maximum(0, xnp.minimum(t1, t2)) t_max = xnp.maximum(t1, t2) return t_min, t_max def _intersect_ranges( r1, r2, xnp = np, ): start = xnp.maximum(r1[0], r2[0]) end = xnp.minimum(r1[1], r2[1]) return (start, end) def ray_box_intersection( ray_o, ray_d, corners, xnp = np ): """Returns enter/exit distances along the ray for box defined by `corners`.""" t1 = (corners[0] - ray_o) / ray_d t2 = (corners[1] - ray_o) / ray_d t_min = xnp.minimum(t1, t2).max(axis=-1) t_max = xnp.maximum(t1, t2).min(axis=-1) return t_min, t_max def modify_rays_with_bbox( rays, corners, xnp = np ): """Sets near/far by bbox intersection and multiplies lossmult by mask.""" lossmult = rays.lossmult near = rays.near far = rays.far t_min, t_max = ray_box_intersection( rays.origins, rays.directions, corners, xnp=xnp ) t_min, t_max = t_min[Ellipsis, None], t_max[Ellipsis, None] hits = t_min <= t_max inear, ifar = _intersect_ranges((near, far), (t_min, t_max), xnp=xnp) overlaps = inear <= ifar valid = hits * overlaps if lossmult is None: lossmult = valid.astype(xnp.float32) else: lossmult = xnp.where(valid, lossmult, 0.0) near = xnp.where(valid, inear, 0.0) far = xnp.where(valid, ifar, 0.0) return rays.replace(lossmult=lossmult, near=near, far=far) def ray_sphere_intersection( ray_o, ray_d, center, radius, xnp = np, ): """Calculates distance to hit a sphere for a ray. Args: ray_o: Ray origin (..., 3) ray_d: Ray direction (..., 3) center: Sphere center (..., 3) radius: Sphere radius (..., 1) xnp: Numpy or Jax module Returns: t_min, t_max, hit. When no hit is found, t_min = t_max = 0. """ oc = ray_o - center a = (ray_d**2).sum(axis=-1) b = 2 * (oc * ray_d).sum(axis=-1) c = (oc * oc).sum(axis=-1) - radius**2 det = b**2 - 4.0 * a * c hit = (det >= 0) * (a > 0) # Nb: Results are 'wrong' if valid = false, this is just to make jax # not freak out. det = xnp.where(hit, det, 0.0) a = xnp.where(hit, a, 1.0) t_min = xnp.where(hit, (-b - xnp.sqrt(det)) / (2.0 * a), 0.0) t_max = xnp.where(hit, (-b + xnp.sqrt(det)) / (2.0 * a), 0.0) return t_min, t_max, hit def gather_cameras(cameras, cam_idx, xnp=np): """Gathers relevant camera parameters for each ray.""" pixtocams, camtoworlds, distortion_params = cameras[:3] if pixtocams.ndim > 2: pixtocams_idx = pixtocams[cam_idx] else: pixtocams_idx = pixtocams if camtoworlds.ndim > 2: camtoworlds_idx = camtoworlds[cam_idx] else: camtoworlds_idx = camtoworlds if distortion_params is not None: distortion_params_idx = {} for k, v in distortion_params.items(): # pytype: disable=attribute-error # jax-ndarray if not xnp.isscalar(v): distortion_params_idx[k] = v[cam_idx] else: distortion_params_idx[k] = v else: distortion_params_idx = None return ( pixtocams_idx, camtoworlds_idx, distortion_params_idx, ) def cast_ray_batch( cameras, rays, camtype = ProjectionType.PERSPECTIVE, scene_bbox = None, xnp = np, ): """Maps from input cameras and uncast Rays batch to output cast Rays batch. `cameras` is a Tuple of five sets of camera parameters. pixtocams: 1 or N stacked [3, 3] inverse intrinsic matrices. camtoworlds: 1 or N stacked [3, 4] extrinsic pose matrices. distortion_params: optional, dict[str, float] containing pinhole model distortion parameters. pixtocam_ndc: optional, [3, 3] inverse intrinsic matrix for mapping to NDC. z_range: optional range of Z values Args: cameras: described above. rays: ray data including integer pixel coordinates and camera indices. These fields can be an arbitrary batch shape. camtype: camera_utils.ProjectionType, fisheye or perspective camera. scene_bbox: min and max corner of scene bounding box, if applicable. xnp: either numpy or jax.numpy. Returns: rays: Rays dataclass with computed 3D world space ray data. """ # rays.cam_idx has shape [..., 1], remove this hanging dimension. cam_idx = rays.cam_idx[Ellipsis, 0] cameras_idx = gather_cameras(cameras, cam_idx, xnp=xnp) pixtocams, camtoworlds, distortion_params = cameras_idx pixtocam_ndc, z_range = cameras[3:5] # Compute rays from pixel coordinates. origins, directions, viewdirs, radii, imageplane = pixels_to_rays( rays.pixels[Ellipsis, 0], rays.pixels[Ellipsis, 1], pixtocams, camtoworlds, distortion_params=distortion_params, pixtocam_ndc=pixtocam_ndc, camtype=camtype, xnp=xnp, ) if z_range is not None: t_min, t_max = rays_planes_intersection( z_range[0], z_range[1], origins, directions, xnp ) t_min = xnp.broadcast_to(t_min[Ellipsis, None], origins.shape) t_max = xnp.broadcast_to(t_max[Ellipsis, None], origins.shape) hit_mask = t_max < t_min origins = xnp.where(hit_mask, origins, origins + directions * t_min) directions = xnp.where(hit_mask, directions, directions * (t_max - t_min)) # Preserve all metadata and add the cast rays. rays = rays.replace( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, ) if scene_bbox is not None: rays = modify_rays_with_bbox(rays, scene_bbox, xnp=xnp) return rays def cast_general_rays( camtoworld, pixtocam, height, width, near, far, distortion_params = None, pixtocam_ndc = None, camtype = ProjectionType.PERSPECTIVE, xnp = np, ): """Wrapper for generating a general ray batch.""" pix_x_int, pix_y_int = pixel_coordinates(width, height, xnp=xnp) ray_args = pixels_to_rays( pix_x_int, pix_y_int, pixtocam, camtoworld, distortion_params=distortion_params, pixtocam_ndc=pixtocam_ndc, camtype=camtype, xnp=xnp, ) broadcast_scalar = lambda x: xnp.broadcast_to(x, pix_x_int.shape)[Ellipsis, None] ray_kwargs = { 'pixels': xnp.stack([pix_x_int, pix_y_int], axis=-1), 'near': broadcast_scalar(near), 'far': broadcast_scalar(far), 'cam_idx': broadcast_scalar(0), } return utils.Rays(*ray_args, **ray_kwargs) def cast_pinhole_rays( camtoworld, height, width, focal, near, far, xnp = np, ): """Generates a pinhole camera ray batch (w/o distortion).""" return cast_general_rays( camtoworld, get_pixtocam(focal, width, height, xnp=xnp), height, width, near, far, camtype=ProjectionType.PERSPECTIVE, xnp=xnp, ) def cast_spherical_rays( camtoworld, height, width, near, far, xnp, ): """Generates a spherical camera ray batch.""" return cast_general_rays( camtoworld, xnp.diag(xnp.array([2.0 * np.pi / width, np.pi / height, 1.0])), height, width, near, far, camtype=ProjectionType.PANORAMIC, xnp=xnp, ) def jax_camera_from_tuple( camera_tuple, image_size, projection_type, ): """Converts a camera tuple into a JAX camera. Args: camera_tuple: A tuple containing `inv_intrinsics`, the inverse intrinsics matrix; `extrinsics`, the camera to world matrix; and `distortion_params`, the dictionary of distortion parameters. image_size: An array containing the (width, height) image size. projection_type: The projection type of the camera. Returns: A JAX camera class instance encoding the same camera information. """ if projection_type.value not in { ProjectionType.PERSPECTIVE.value, ProjectionType.FISHEYE.value, }: raise ValueError(f'Projection {projection_type} is not supported.') inv_intrinsics, extrinsic, distortion_params = camera_tuple[:3] intrinsics = jnp.linalg.inv(inv_intrinsics) focal_length = intrinsics[0, 0] principal_point = intrinsics[:2, 2] pixel_aspect_ratio = intrinsics[1, 1] / intrinsics[0, 0] radial_distortion = None tangential_distortion = None if distortion_params is not None: if ( 'k1' in distortion_params and 'k2' in distortion_params and 'k3' in distortion_params ): radial_keys = ['k1', 'k2', 'k3', 'k4'] radial_distortion = jnp.array( [distortion_params[k] for k in radial_keys if k in distortion_params] ) if 'p1' in distortion_params and 'p2' in distortion_params: tangential_distortion = jnp.array([ distortion_params['p1'], distortion_params['p2'], ]) extrinsic = jnp.concatenate( [extrinsic[:3, :4], jnp.array([[0, 0, 0, 1]])], axis=0 ) # Convert to OpenCV coordinates. extrinsic = math.matmul(extrinsic, jnp.diag(jnp.array([1, -1, -1, 1]))) world_to_cam = jnp.linalg.inv(extrinsic) camera = jaxcam.Camera.create( focal_length=focal_length, pixel_aspect_ratio=pixel_aspect_ratio, radial_distortion=radial_distortion, tangential_distortion=tangential_distortion, principal_point=principal_point, image_size=image_size, is_fisheye=(projection_type.value == ProjectionType.FISHEYE.value), ) camera = jaxcam.update_world_to_camera_matrix(camera, world_to_cam) return camera def tuple_from_jax_camera( jax_camera, ): """Converts a JAX camera into a camera tuple.""" focal_x = jax_camera.focal_length focal_y = jax_camera.focal_length * jax_camera.pixel_aspect_ratio intrinsic = jnp.block([ [focal_x, jax_camera.skew, jax_camera.principal_point[0]], [0, focal_y, jax_camera.principal_point[1]], [0, 0, 1], ]) pix_to_cam = jnp.linalg.inv(intrinsic) world_to_cam = jaxcam.world_to_camera_matrix(jax_camera) cam_to_world = jnp.linalg.inv(world_to_cam) # Convert back to OpenGL coordinates. cam_to_world = math.matmul(cam_to_world, jnp.diag(jnp.array([1, -1, -1, 1]))) cam_to_world = cam_to_world[:3, :] distortion_params = None if jax_camera.has_distortion: distortion_params = {} if jax_camera.has_radial_distortion: distortion_params.update({ 'k1': jax_camera.radial_distortion[0], 'k2': jax_camera.radial_distortion[1], 'k3': jax_camera.radial_distortion[2], 'k4': jax_camera.radial_distortion[3], }) if jax_camera.has_tangential_distortion: distortion_params.update({ 'p1': jax_camera.tangential_distortion[0], 'p2': jax_camera.tangential_distortion[1], }) return pix_to_cam, cam_to_world, distortion_params def rotation_distance( rotation_mat1, rotation_mat2 ): """Computes the angle between two rotation matrices in degrees. Args: rotation_mat1: (3, 3) The first batch of rotation matrix. rotation_mat2: (3, 3) The second batch of rotation matrix. Returns: The angle in degrees between 0 and 180. """ axis_angle1 = rigid_body.log_so3(rotation_mat1) axis_angle2 = rigid_body.log_so3(rotation_mat2) orientation_error_deg = jnp.degrees( jnp.linalg.norm(axis_angle1 - axis_angle2, axis=-1) ) return jnp.where( # pytype: disable=bad-return-type # jnp-type orientation_error_deg < 180, orientation_error_deg, 360 - orientation_error_deg, ) def compute_camera_metrics( cameras_gt, cameras_pred ): """Computes the metrics between two cameras.""" orientation_diffs = jax.vmap(rotation_distance)( cameras_pred.orientation, cameras_gt.orientation ) translation_diffs = jnp.abs(cameras_pred.translation - cameras_gt.translation) diffs = { 'focal_length': jnp.abs( cameras_pred.focal_length - cameras_gt.focal_length ), 'position': jnp.linalg.norm( cameras_pred.position - cameras_gt.position, axis=-1 ), 'translation_x': translation_diffs[Ellipsis, 0], 'translation_y': translation_diffs[Ellipsis, 1], 'translation_z': translation_diffs[Ellipsis, 2], 'orientation': jnp.abs(orientation_diffs), 'principal_points': jnp.linalg.norm( cameras_pred.principal_point - cameras_gt.principal_point, axis=-1, ), } if cameras_pred.radial_distortion is not None: radial_distortion_gt = jnp.zeros(4) if cameras_gt.has_radial_distortion: radial_distortion_gt = cameras_gt.radial_distortion for i in range(cameras_pred.radial_distortion.shape[-1]): diffs[f'radial_distortion_{i}'] = jnp.abs( cameras_pred.radial_distortion[Ellipsis, i] - radial_distortion_gt[Ellipsis, i] ) if cameras_pred.tangential_distortion is not None: tangential_distortion_gt = jnp.zeros(2) if cameras_gt.has_tangential_distortion: tangential_distortion_gt = cameras_gt.radial_distortion for i in range(cameras_pred.tangential_distortion.shape[-1]): diffs[f'tangential_distortion_{i}'] = jnp.abs( cameras_pred.tangential_distortion[Ellipsis, i] - tangential_distortion_gt[Ellipsis, i] ) return diffs def perturb_cameras( rng, cameras, sigma_look_at, sigma_position, sigma_focal_length = 0.0, sigma_dolly_z = 0.0, single_dolly = True, dolly_use_average = False, ): """Randomly perturb camera positions and orientations. For position the 3D coordinate is simply shifted according to an offset vector. For the orientation an offset angle is calculated based on spherical coordinates. The underlying offsets are randomly chosen using normal distributions absed on the input sigmas. Args: rng: A PRNGKey. cameras: Cameras to perturb. sigma_look_at: Strength of look-at position offset. Higher means stronger. sigma_position: Strength of position offset. Higher means stronger. sigma_focal_length: Strength of focal length zoom z-axis scale. Higher means stronger. This is essentially a percentage (0.2 means 20%). sigma_dolly_z: Strength of Dolly zoom z-axis scale. Higher means stronger. This is essentially a percentage (0.2 means 20%). single_dolly: If True, only have a single perturbation for dolly zoom. dolly_use_average: If True, set the dolly z to the average of the input instead of perturbing. Returns: Perturbed cameras. """ # Dolly zoom. if sigma_dolly_z > 0.0 or dolly_use_average: # Turn out "percentage" into a log scale. This is equivalent to having # minval = log(1+s) and maxval = log(1/(1+s)) but sampling from a normal # distribution. log_sigma_dolly_z = jnp.log1p(sigma_dolly_z) rng, dolly_key = random.split(rng) translation = cameras.translation x, y, z = jnp.split(translation, 3, -1) if dolly_use_average: new_z = jnp.broadcast_to(z.mean(axis=0, keepdims=True), z.shape) elif single_dolly: new_z = z * jnp.exp(random.normal(dolly_key, (1,)) * log_sigma_dolly_z) else: new_z = z * jnp.exp(random.normal(dolly_key, z.shape) * log_sigma_dolly_z) new_focal_length = cameras.focal_length * (new_z / z).squeeze(-1) new_translation = jnp.concatenate([x, y, new_z], axis=-1) new_position = jax.vmap(spin_math.matmul)( -cameras.orientation.swapaxes(-1, -2), new_translation ) cameras = cameras.replace( position=new_position, focal_length=new_focal_length ) # Perturb focal length. rng, key = random.split(rng) new_focal_length = cameras.focal_length * jnp.exp( random.normal(key, cameras.shape) * jnp.log1p(sigma_focal_length) ) cameras = cameras.replace(focal_length=new_focal_length) camera_positions = cameras.position up_vectors = -cameras.orientation[Ellipsis, 1, :] # Perturb camera positions. rng, key = random.split(rng) perturb_dir = spin_math.normalize(random.normal(key, camera_positions.shape)) camera_positions_perturbed = np.array( sigma_position * perturb_dir + camera_positions ) # Perturb look-at point. look_at_positions = jax.vmap(geometry.line_closest_point)( cameras.position, cameras.optical_axis, jnp.zeros_like(cameras.position) ) rng, key = random.split(rng) perturb_dir = math.normalize(random.normal(key, camera_positions.shape)) look_at_positions_perturbed = np.array( sigma_look_at * perturb_dir + look_at_positions ) # Apply the look-at function. new_cameras = [] for camera, camera_position, look_at_position, up_vector in zip( cameras, camera_positions_perturbed, look_at_positions_perturbed, up_vectors, ): new_cameras.append( jaxcam.look_at( camera=camera, eye=camera_position, center=look_at_position, world_up=up_vector, ) ) cameras = jaxcam.concatenate(new_cameras) return cameras
evocodebench_data_251
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions and transforms for rigid body dynamics. Many equations are from the Modern Robotics textbook available online at: http://hades.mech.northwestern.edu/index.php/Modern_Robotics Note that many operations here use a `jnp.where` to avoid evaluating at numerically unstable or undefined regions of the domain. In addition, to avoid NaNs accumulating through `jnp.where` expressions of unsafe math operations, we also wrap the argument of those operations in another `jnp.where` call See: https://jax.readthedocs.io/en/latest/faq.html#gradients-contain-nan-where-using-where """ from typing import Tuple from internal import quaternion as quat_lib from internal import spin_math import jax from jax import numpy as jnp import optax def _safe_sqrt(x): """safe_sqrt with the value at zero set to eps to avoid divide by zero.""" return spin_math.safe_sqrt(x, value_at_zero=jnp.finfo(jnp.float32).eps) @jax.jit def skew(w): """Build a skew matrix ("cross product matrix") for vector w. Modern Robotics Eqn 3.30. Args: w: (3,) A 3-vector Returns: W: (3, 3) A skew matrix such that W @ v == w x v """ w = jnp.reshape(w, (3)) return jnp.array([[0.0, -w[2], w[1]], [w[2], 0.0, -w[0]], [-w[1], w[0], 0.0]]) def unskew(W): """Convert a skew matrix to a vector w. See `skew()` for documentation. Args: W: (3, 3) A skew matrix. Returns: w: (3,) A 3-vector corresponding to the skew matrix. """ return jnp.stack([W[2, 1], W[0, 2], W[1, 0]], axis=-1) def rp_to_se3(R, p): """Rotation and translation to homogeneous transform. Args: R: (3, 3) An orthonormal rotation matrix. p: (3,) A 3-vector representing an offset. Returns: X: (4, 4) The homogeneous transformation matrix described by rotating by R and translating by p. """ p = jnp.reshape(p, (3, 1)) return jnp.block([[R, p], [jnp.array([[0.0, 0.0, 0.0, 1.0]])]]) def se3_to_rp(X): """Converts a homogeneous transform to a rotation and translation. Args: X: (4, 4) A homogeneous transformation matrix. Returns: R: (3, 3) An orthonormal rotation matrix. p: (3,) A 3-vector representing an offset. """ R = X[Ellipsis, :3, :3] p = X[Ellipsis, :3, 3] return R, p def exp_so3( axis_angle, eps=jnp.finfo(jnp.float32).eps ): """Exponential map from Lie algebra so3 to Lie group SO3. Modern Robotics Eqn 3.51, a.k.a. Rodrigues' formula. Args: axis_angle: A 3-vector where the direction is the axis of rotation and the magnitude is the angle of rotation. eps: an epsilon value for numerical stability. Returns: R: (3, 3) An orthonormal rotation matrix representing the same rotation. """ theta_squared = jnp.sum(axis_angle**2, axis=-1) theta = _safe_sqrt(theta_squared) # Near zero, we switch to using the first order Taylor expansion. R_taylor = jnp.eye(3) + skew(axis_angle) # Prevent bad gradients from propagating back when theta is small. axis_angle_safe = jnp.where(theta_squared > eps**2, axis_angle, 0.0) theta_safe = jnp.where(theta_squared > eps**2, theta, 1.0) axis = axis_angle_safe / theta_safe W = skew(axis) R = ( jnp.eye(3) + jnp.sin(theta_safe) * W + (1.0 - jnp.cos(theta_safe)) * spin_math.matmul(W, W) ) return jnp.where(theta_squared > eps**2, R, R_taylor) def log_so3(R, eps=jnp.finfo(jnp.float32).eps): """Matrix logarithm from the Lie group SO3 to the Lie algebra so3. Modern Robotics Eqn 3.53. Args: R: (3, 3) An orthonormal rotation matrix. eps: an epsilon value for numerical stability. Returns: w: (3,) The unit vector representing the axis of rotation. theta: The angle of rotation. """ q = quat_lib.from_rotation_matrix(R, eps) axis_angle = quat_lib.to_axis_angle(q, eps) return axis_angle def exp_se3( screw_axis, eps=jnp.finfo(jnp.float32).eps ): """Exponential map from Lie algebra so3 to Lie group SO3. Modern Robotics Eqn 3.88. Args: screw_axis: A 6-vector encoding a screw axis of motion. This can be broken down into [w, v] where w is an angle-axis rotation and v represents a translation. ||w|| corresponds to the magnitude of motion. eps: an epsilon value for numerical stability. Returns: a_X_b: (4, 4) The homogeneous transformation matrix attained by integrating motion of magnitude theta about S for one second. """ w, v = jnp.split(screw_axis, 2) R = exp_so3(w) theta_squared = jnp.sum(w**2, axis=-1) theta = _safe_sqrt(theta_squared) W = skew(w / theta) # Note that p = 0 when theta = 0. p = spin_math.matmul( ( theta * jnp.eye(3) + (1.0 - jnp.cos(theta)) * W + (theta - jnp.sin(theta)) * spin_math.matmul(W, W) ), v / theta, ) # If theta^2 is close to 0 it means this is a pure translation so p = v. p = jnp.where(theta_squared > eps**2, p, v) return rp_to_se3(R, p) def log_se3(a_X_b, eps=jnp.finfo(jnp.float32).eps): """Matrix logarithm from the Lie group SE3 to the Lie algebra se3. Modern Robotics Eqn 3.91-3.92. Args: a_X_b: (4,4) A homogeneous transformation matrix. eps: an epsilon value for numerical stability. Returns: screw_axis: A 6-vector encoding a screw axis of motion. This can be broken down into [w, v] where w is an angle-axis rotation and v represents a translation. The ||w|| and ||v|| both correspond to the magnitude of motion. """ R, p = se3_to_rp(a_X_b) w = log_so3(R, eps) theta_squared = jnp.sum(w**2, axis=-1) theta = spin_math.safe_sqrt(theta_squared) W = skew(w / theta) G_inv1 = jnp.eye(3) G_inv2 = theta * -W / 2.0 G_inv3 = (1.0 - 0.5 * theta / jnp.tan(theta / 2.0)) * spin_math.matmul(W, W) G_inv = G_inv1 + G_inv2 + G_inv3 v = spin_math.matmul(G_inv, p[Ellipsis, jnp.newaxis]).squeeze(-1) # If theta = 0 then the transformation is a pure translation and v = p. # This avoids using the numerically unstable G matrix when theta is near zero. v = jnp.where(theta_squared > eps, v, p) S = jnp.concatenate([w, v], axis=-1) return S def rts_to_sim3( rotation, translation, scale ): """Converts a rotation, translation and scale to a homogeneous transform. Args: rotation: (3, 3) An orthonormal rotation matrix. translation: (3,) A 3-vector representing a translation. scale: A scalar factor. Returns: (4, 4) A homogeneous transformation matrix. """ transform = jnp.eye(4) transform = transform.at[:3, :3].set(rotation * scale) transform = transform.at[:3, 3].set(translation) return transform def sim3_to_rts( transform, ): """Converts a homogeneous transform to rotation, translation and scale. Args: transform: (4, 4) A homogeneous transformation matrix. Returns: rotation: (3, 3) An orthonormal rotation matrix. translation: (3,) A 3-vector representing a translation. scale: A scalar factor. """ eps = jnp.float32(jnp.finfo(jnp.float32).tiny) rotation_scale = transform[Ellipsis, :3, :3] # Assumes rotation is an orthonormal transform, thus taking norm of first row. scale = optax.safe_norm(rotation_scale, min_norm=eps, axis=1)[0] rotation = rotation_scale / scale translation = transform[Ellipsis, :3, 3] return rotation, translation, scale def ortho6d_from_rotation_matrix(rotation_matrix): """Converts a matrix to an ortho6d by taking the first two columns.""" return rotation_matrix[Ellipsis, :2, :].reshape(*rotation_matrix.shape[:-2], 6) def rotation_matrix_from_ortho6d(ortho6d): """Computes the 3D rotation matrix from the 6D representation. Zhou et al. have proposed a novel 6D representation for the rotation in SO(3) which is completely continuous. This is highly benificial and produces better results than most standard rotation representations for many tasks, especially when the predicted value is close to the discontinuity of the utilized rotation represantation. This function converts from the proposed 6 dimensional representation to the classic 3x3 rotation matrix. See https://arxiv.org/pdf/1812.07035.pdf for more information. Args: ortho6d: 6D represantion for the rotation according Zhou et al. of shape [6]. Returns: (3, 3) The associated 3x3 rotation matrices. """ if ortho6d.ndim != 1 or ortho6d.shape[0] != 6: raise ValueError('The shape of the input ortho 6D vector needs to be (6).') a1, a2 = ortho6d[Ellipsis, :3], ortho6d[Ellipsis, 3:] b1 = spin_math.normalize(a1) b2 = a2 - jnp.sum(b1 * a2, axis=-1, keepdims=True) * b1 b2 = spin_math.normalize(b2) b3 = jnp.cross(b1, b2) return jnp.stack((b1, b2, b3), axis=-2)
evocodebench_data_252
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions and transforms for rigid body dynamics. Many equations are from the Modern Robotics textbook available online at: http://hades.mech.northwestern.edu/index.php/Modern_Robotics Note that many operations here use a `jnp.where` to avoid evaluating at numerically unstable or undefined regions of the domain. In addition, to avoid NaNs accumulating through `jnp.where` expressions of unsafe math operations, we also wrap the argument of those operations in another `jnp.where` call See: https://jax.readthedocs.io/en/latest/faq.html#gradients-contain-nan-where-using-where """ from typing import Tuple from internal import quaternion as quat_lib from internal import spin_math import jax from jax import numpy as jnp import optax def _safe_sqrt(x): """safe_sqrt with the value at zero set to eps to avoid divide by zero.""" return spin_math.safe_sqrt(x, value_at_zero=jnp.finfo(jnp.float32).eps) @jax.jit def skew(w): """Build a skew matrix ("cross product matrix") for vector w. Modern Robotics Eqn 3.30. Args: w: (3,) A 3-vector Returns: W: (3, 3) A skew matrix such that W @ v == w x v """ w = jnp.reshape(w, (3)) return jnp.array([[0.0, -w[2], w[1]], [w[2], 0.0, -w[0]], [-w[1], w[0], 0.0]]) def unskew(W): """Convert a skew matrix to a vector w. See `skew()` for documentation. Args: W: (3, 3) A skew matrix. Returns: w: (3,) A 3-vector corresponding to the skew matrix. """ return jnp.stack([W[2, 1], W[0, 2], W[1, 0]], axis=-1) def rp_to_se3(R, p): """Rotation and translation to homogeneous transform. Args: R: (3, 3) An orthonormal rotation matrix. p: (3,) A 3-vector representing an offset. Returns: X: (4, 4) The homogeneous transformation matrix described by rotating by R and translating by p. """ p = jnp.reshape(p, (3, 1)) return jnp.block([[R, p], [jnp.array([[0.0, 0.0, 0.0, 1.0]])]]) def se3_to_rp(X): """Converts a homogeneous transform to a rotation and translation. Args: X: (4, 4) A homogeneous transformation matrix. Returns: R: (3, 3) An orthonormal rotation matrix. p: (3,) A 3-vector representing an offset. """ R = X[Ellipsis, :3, :3] p = X[Ellipsis, :3, 3] return R, p def exp_so3( axis_angle, eps=jnp.finfo(jnp.float32).eps ): """Exponential map from Lie algebra so3 to Lie group SO3. Modern Robotics Eqn 3.51, a.k.a. Rodrigues' formula. Args: axis_angle: A 3-vector where the direction is the axis of rotation and the magnitude is the angle of rotation. eps: an epsilon value for numerical stability. Returns: R: (3, 3) An orthonormal rotation matrix representing the same rotation. """ theta_squared = jnp.sum(axis_angle**2, axis=-1) theta = _safe_sqrt(theta_squared) # Near zero, we switch to using the first order Taylor expansion. R_taylor = jnp.eye(3) + skew(axis_angle) # Prevent bad gradients from propagating back when theta is small. axis_angle_safe = jnp.where(theta_squared > eps**2, axis_angle, 0.0) theta_safe = jnp.where(theta_squared > eps**2, theta, 1.0) axis = axis_angle_safe / theta_safe W = skew(axis) R = ( jnp.eye(3) + jnp.sin(theta_safe) * W + (1.0 - jnp.cos(theta_safe)) * spin_math.matmul(W, W) ) return jnp.where(theta_squared > eps**2, R, R_taylor) def log_so3(R, eps=jnp.finfo(jnp.float32).eps): """Matrix logarithm from the Lie group SO3 to the Lie algebra so3. Modern Robotics Eqn 3.53. Args: R: (3, 3) An orthonormal rotation matrix. eps: an epsilon value for numerical stability. Returns: w: (3,) The unit vector representing the axis of rotation. theta: The angle of rotation. """ q = quat_lib.from_rotation_matrix(R, eps) axis_angle = quat_lib.to_axis_angle(q, eps) return axis_angle def exp_se3( screw_axis, eps=jnp.finfo(jnp.float32).eps ): """Exponential map from Lie algebra so3 to Lie group SO3. Modern Robotics Eqn 3.88. Args: screw_axis: A 6-vector encoding a screw axis of motion. This can be broken down into [w, v] where w is an angle-axis rotation and v represents a translation. ||w|| corresponds to the magnitude of motion. eps: an epsilon value for numerical stability. Returns: a_X_b: (4, 4) The homogeneous transformation matrix attained by integrating motion of magnitude theta about S for one second. """ w, v = jnp.split(screw_axis, 2) R = exp_so3(w) theta_squared = jnp.sum(w**2, axis=-1) theta = _safe_sqrt(theta_squared) W = skew(w / theta) # Note that p = 0 when theta = 0. p = spin_math.matmul( ( theta * jnp.eye(3) + (1.0 - jnp.cos(theta)) * W + (theta - jnp.sin(theta)) * spin_math.matmul(W, W) ), v / theta, ) # If theta^2 is close to 0 it means this is a pure translation so p = v. p = jnp.where(theta_squared > eps**2, p, v) return rp_to_se3(R, p) def log_se3(a_X_b, eps=jnp.finfo(jnp.float32).eps): """Matrix logarithm from the Lie group SE3 to the Lie algebra se3. Modern Robotics Eqn 3.91-3.92. Args: a_X_b: (4,4) A homogeneous transformation matrix. eps: an epsilon value for numerical stability. Returns: screw_axis: A 6-vector encoding a screw axis of motion. This can be broken down into [w, v] where w is an angle-axis rotation and v represents a translation. The ||w|| and ||v|| both correspond to the magnitude of motion. """ R, p = se3_to_rp(a_X_b) w = log_so3(R, eps) theta_squared = jnp.sum(w**2, axis=-1) theta = spin_math.safe_sqrt(theta_squared) W = skew(w / theta) G_inv1 = jnp.eye(3) G_inv2 = theta * -W / 2.0 G_inv3 = (1.0 - 0.5 * theta / jnp.tan(theta / 2.0)) * spin_math.matmul(W, W) G_inv = G_inv1 + G_inv2 + G_inv3 v = spin_math.matmul(G_inv, p[Ellipsis, jnp.newaxis]).squeeze(-1) # If theta = 0 then the transformation is a pure translation and v = p. # This avoids using the numerically unstable G matrix when theta is near zero. v = jnp.where(theta_squared > eps, v, p) S = jnp.concatenate([w, v], axis=-1) return S def rts_to_sim3( rotation, translation, scale ): """Converts a rotation, translation and scale to a homogeneous transform. Args: rotation: (3, 3) An orthonormal rotation matrix. translation: (3,) A 3-vector representing a translation. scale: A scalar factor. Returns: (4, 4) A homogeneous transformation matrix. """ transform = jnp.eye(4) transform = transform.at[:3, :3].set(rotation * scale) transform = transform.at[:3, 3].set(translation) return transform def sim3_to_rts( transform, ): """Converts a homogeneous transform to rotation, translation and scale. Args: transform: (4, 4) A homogeneous transformation matrix. Returns: rotation: (3, 3) An orthonormal rotation matrix. translation: (3,) A 3-vector representing a translation. scale: A scalar factor. """ eps = jnp.float32(jnp.finfo(jnp.float32).tiny) rotation_scale = transform[Ellipsis, :3, :3] # Assumes rotation is an orthonormal transform, thus taking norm of first row. scale = optax.safe_norm(rotation_scale, min_norm=eps, axis=1)[0] rotation = rotation_scale / scale translation = transform[Ellipsis, :3, 3] return rotation, translation, scale def ortho6d_from_rotation_matrix(rotation_matrix): """Converts a matrix to an ortho6d by taking the first two columns.""" return rotation_matrix[Ellipsis, :2, :].reshape(*rotation_matrix.shape[:-2], 6) def rotation_matrix_from_ortho6d(ortho6d): """Computes the 3D rotation matrix from the 6D representation. Zhou et al. have proposed a novel 6D representation for the rotation in SO(3) which is completely continuous. This is highly benificial and produces better results than most standard rotation representations for many tasks, especially when the predicted value is close to the discontinuity of the utilized rotation represantation. This function converts from the proposed 6 dimensional representation to the classic 3x3 rotation matrix. See https://arxiv.org/pdf/1812.07035.pdf for more information. Args: ortho6d: 6D represantion for the rotation according Zhou et al. of shape [6]. Returns: (3, 3) The associated 3x3 rotation matrices. """ if ortho6d.ndim != 1 or ortho6d.shape[0] != 6: raise ValueError('The shape of the input ortho 6D vector needs to be (6).') a1, a2 = ortho6d[Ellipsis, :3], ortho6d[Ellipsis, 3:] b1 = spin_math.normalize(a1) b2 = a2 - jnp.sum(b1 * a2, axis=-1, keepdims=True) * b1 b2 = spin_math.normalize(b2) b3 = jnp.cross(b1, b2) return jnp.stack((b1, b2, b3), axis=-2)
evocodebench_data_253
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions for shooting and rendering rays.""" import jax import jax.numpy as jnp import jax.scipy as jsp from internal import math from internal import stepfun def lift_gaussian(d, t_mean, t_var, r_var, diag): """Lift a Gaussian defined along a ray to 3D coordinates.""" mean = d[Ellipsis, None, :] * t_mean[Ellipsis, None] d_mag_sq = jnp.maximum(1e-10, jnp.sum(d**2, axis=-1, keepdims=True)) if diag: d_outer_diag = d**2 null_outer_diag = 1 - d_outer_diag / d_mag_sq t_cov_diag = t_var[Ellipsis, None] * d_outer_diag[Ellipsis, None, :] xy_cov_diag = r_var[Ellipsis, None] * null_outer_diag[Ellipsis, None, :] cov_diag = t_cov_diag + xy_cov_diag return mean, cov_diag else: d_outer = d[Ellipsis, :, None] * d[Ellipsis, None, :] eye = jnp.eye(d.shape[-1]) null_outer = eye - d[Ellipsis, :, None] * (d / d_mag_sq)[Ellipsis, None, :] t_cov = t_var[Ellipsis, None, None] * d_outer[Ellipsis, None, :, :] xy_cov = r_var[Ellipsis, None, None] * null_outer[Ellipsis, None, :, :] cov = t_cov + xy_cov return mean, cov def gaussianize_frustum(t0, t1): """Convert intervals along a conical frustum into means and variances.""" # A more stable version of Equation 7 from https://arxiv.org/abs/2103.13415. s = t0 + t1 d = t1 - t0 eps = jnp.finfo(jnp.float32).eps ** 2 ratio = d**2 / jnp.maximum(eps, 3 * s**2 + d**2) t_mean = s * (1 / 2 + ratio) t_var = (1 / 12) * d**2 - (1 / 15) * ratio**2 * (12 * s**2 - d**2) r_var = (1 / 16) * s**2 + d**2 * (5 / 48 - (1 / 15) * ratio) return t_mean, t_var, r_var def conical_frustum_to_gaussian(d, t0, t1, base_radius, diag): """Approximate a 3D conical frustum as a Gaussian distribution (mean+cov). Assumes the ray is originating from the origin, and base_radius is the radius at dist=1. Doesn't assume `d` is normalized. Args: d: jnp.float32 3-vector, the axis of the cone t0: float, the starting distance of the frustum. t1: float, the ending distance of the frustum. base_radius: float, the scale of the radius as a function of distance. diag: boolean, whether or the Gaussian will be diagonal or full-covariance. Returns: a Gaussian (mean and covariance). """ t_mean, t_var, r_var = gaussianize_frustum(t0, t1) r_var *= base_radius**2 mean, cov = lift_gaussian(d, t_mean, t_var, r_var, diag) return mean, cov def cylinder_to_gaussian(d, t0, t1, radius, diag): """Approximate a cylinder as a Gaussian distribution (mean+cov). Assumes the ray is originating from the origin, and radius is the radius. Does not renormalize `d`. Args: d: jnp.float32 3-vector, the axis of the cylinder t0: float, the starting distance of the cylinder. t1: float, the ending distance of the cylinder. radius: float, the radius of the cylinder diag: boolean, whether or the Gaussian will be diagonal or full-covariance. Returns: a Gaussian (mean and covariance). """ t_mean = (t0 + t1) / 2 r_var = radius**2 / 4 t_var = (t1 - t0) ** 2 / 12 return lift_gaussian(d, t_mean, t_var, r_var, diag) def cast_rays(tdist, origins, directions, radii, ray_shape, diag=True): """Cast rays (cone- or cylinder-shaped) and featurize sections of it. Args: tdist: float array, the "fencepost" distances along the ray. origins: float array, the ray origin coordinates. directions: float array, the ray direction vectors. radii: float array, the radii (base radii for cones) of the rays. ray_shape: string, the shape of the ray, must be 'cone' or 'cylinder'. diag: boolean, whether or not the covariance matrices should be diagonal. Returns: a tuple of arrays of means and covariances. """ t0 = tdist[Ellipsis, :-1] t1 = tdist[Ellipsis, 1:] if ray_shape == 'cone': gaussian_fn = conical_frustum_to_gaussian elif ray_shape == 'cylinder': gaussian_fn = cylinder_to_gaussian else: raise ValueError("ray_shape must be 'cone' or 'cylinder'") means, covs = gaussian_fn(directions, t0, t1, radii, diag) means = means + origins[Ellipsis, None, :] return means, covs def compute_alpha_weights_helper(density_delta): """Helper function for compute_alpha_weights.""" log_trans = -jnp.concatenate( [ jnp.zeros_like(density_delta[Ellipsis, :1]), jnp.cumsum(density_delta[Ellipsis, :-1], axis=-1), ], axis=-1, ) alpha = 1 - jnp.exp(-density_delta) trans = jnp.exp(log_trans) weights = alpha * trans return weights def compute_alpha_weights( density, tdist, dirs, **kwargs, ): """Helper function for computing alpha compositing weights.""" t_delta = jnp.diff(tdist) delta = t_delta * jnp.linalg.norm(dirs[Ellipsis, None, :], axis=-1) density_delta = density * delta return compute_alpha_weights_helper(density_delta, **kwargs) def volumetric_rendering( rgbs, weights, tdist, bg_rgbs, compute_extras, extras=None, percentiles = (5, 50, 95), ): """Volumetric Rendering Function. Args: rgbs: jnp.ndarray(float32), color, [batch_size, num_samples, 3] weights: jnp.ndarray(float32), weights, [batch_size, num_samples]. tdist: jnp.ndarray(float32), [batch_size, num_samples]. bg_rgbs: jnp.ndarray(float32), the color(s) to use for the background. compute_extras: bool, if True, compute extra quantities besides color. extras: dict, a set of values along rays to render by alpha compositing. percentiles: depth will be returned for these percentiles. Returns: rendering: a dict containing an rgb image of size [batch_size, 3], and other visualizations if compute_extras=True. """ eps = jnp.finfo(jnp.float32).eps rendering = {} acc = weights.sum(axis=-1) bg_w = jnp.maximum(0, 1 - acc[Ellipsis, None]) # The weight of the background. if rgbs is not None: rgb = (weights[Ellipsis, None] * rgbs).sum(axis=-2) + bg_w * bg_rgbs else: rgb = None rendering['rgb'] = rgb if compute_extras: rendering['acc'] = acc if extras is not None: for k, v in extras.items(): if v is not None: rendering[k] = (weights[Ellipsis, None] * v).sum(axis=-2) expectation = lambda x: (weights * x).sum(axis=-1) / jnp.maximum(eps, acc) t_mids = 0.5 * (tdist[Ellipsis, :-1] + tdist[Ellipsis, 1:]) # For numerical stability this expectation is computing using log-distance. rendering['distance_mean'] = jnp.clip( jnp.nan_to_num(jnp.exp(expectation(jnp.log(t_mids))), jnp.inf), tdist[Ellipsis, 0], tdist[Ellipsis, -1], ) # Normalize the weights to sum to 1. weights_norm = weights / jnp.maximum(eps, acc[Ellipsis, None]) distance_percentiles = stepfun.weighted_percentile( tdist, weights_norm, percentiles ) for i, p in enumerate(percentiles): s = 'median' if p == 50 else 'percentile_' + str(p) rendering['distance_' + s] = distance_percentiles[Ellipsis, i] return rendering
evocodebench_data_254
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions for shooting and rendering rays.""" import jax import jax.numpy as jnp import jax.scipy as jsp from internal import math from internal import stepfun def lift_gaussian(d, t_mean, t_var, r_var, diag): """Lift a Gaussian defined along a ray to 3D coordinates.""" mean = d[Ellipsis, None, :] * t_mean[Ellipsis, None] d_mag_sq = jnp.maximum(1e-10, jnp.sum(d**2, axis=-1, keepdims=True)) if diag: d_outer_diag = d**2 null_outer_diag = 1 - d_outer_diag / d_mag_sq t_cov_diag = t_var[Ellipsis, None] * d_outer_diag[Ellipsis, None, :] xy_cov_diag = r_var[Ellipsis, None] * null_outer_diag[Ellipsis, None, :] cov_diag = t_cov_diag + xy_cov_diag return mean, cov_diag else: d_outer = d[Ellipsis, :, None] * d[Ellipsis, None, :] eye = jnp.eye(d.shape[-1]) null_outer = eye - d[Ellipsis, :, None] * (d / d_mag_sq)[Ellipsis, None, :] t_cov = t_var[Ellipsis, None, None] * d_outer[Ellipsis, None, :, :] xy_cov = r_var[Ellipsis, None, None] * null_outer[Ellipsis, None, :, :] cov = t_cov + xy_cov return mean, cov def gaussianize_frustum(t0, t1): """Convert intervals along a conical frustum into means and variances.""" # A more stable version of Equation 7 from https://arxiv.org/abs/2103.13415. s = t0 + t1 d = t1 - t0 eps = jnp.finfo(jnp.float32).eps ** 2 ratio = d**2 / jnp.maximum(eps, 3 * s**2 + d**2) t_mean = s * (1 / 2 + ratio) t_var = (1 / 12) * d**2 - (1 / 15) * ratio**2 * (12 * s**2 - d**2) r_var = (1 / 16) * s**2 + d**2 * (5 / 48 - (1 / 15) * ratio) return t_mean, t_var, r_var def conical_frustum_to_gaussian(d, t0, t1, base_radius, diag): """Approximate a 3D conical frustum as a Gaussian distribution (mean+cov). Assumes the ray is originating from the origin, and base_radius is the radius at dist=1. Doesn't assume `d` is normalized. Args: d: jnp.float32 3-vector, the axis of the cone t0: float, the starting distance of the frustum. t1: float, the ending distance of the frustum. base_radius: float, the scale of the radius as a function of distance. diag: boolean, whether or the Gaussian will be diagonal or full-covariance. Returns: a Gaussian (mean and covariance). """ t_mean, t_var, r_var = gaussianize_frustum(t0, t1) r_var *= base_radius**2 mean, cov = lift_gaussian(d, t_mean, t_var, r_var, diag) return mean, cov def cylinder_to_gaussian(d, t0, t1, radius, diag): """Approximate a cylinder as a Gaussian distribution (mean+cov). Assumes the ray is originating from the origin, and radius is the radius. Does not renormalize `d`. Args: d: jnp.float32 3-vector, the axis of the cylinder t0: float, the starting distance of the cylinder. t1: float, the ending distance of the cylinder. radius: float, the radius of the cylinder diag: boolean, whether or the Gaussian will be diagonal or full-covariance. Returns: a Gaussian (mean and covariance). """ t_mean = (t0 + t1) / 2 r_var = radius**2 / 4 t_var = (t1 - t0) ** 2 / 12 return lift_gaussian(d, t_mean, t_var, r_var, diag) def cast_rays(tdist, origins, directions, radii, ray_shape, diag=True): """Cast rays (cone- or cylinder-shaped) and featurize sections of it. Args: tdist: float array, the "fencepost" distances along the ray. origins: float array, the ray origin coordinates. directions: float array, the ray direction vectors. radii: float array, the radii (base radii for cones) of the rays. ray_shape: string, the shape of the ray, must be 'cone' or 'cylinder'. diag: boolean, whether or not the covariance matrices should be diagonal. Returns: a tuple of arrays of means and covariances. """ t0 = tdist[Ellipsis, :-1] t1 = tdist[Ellipsis, 1:] if ray_shape == 'cone': gaussian_fn = conical_frustum_to_gaussian elif ray_shape == 'cylinder': gaussian_fn = cylinder_to_gaussian else: raise ValueError("ray_shape must be 'cone' or 'cylinder'") means, covs = gaussian_fn(directions, t0, t1, radii, diag) means = means + origins[Ellipsis, None, :] return means, covs def compute_alpha_weights_helper(density_delta): """Helper function for compute_alpha_weights.""" log_trans = -jnp.concatenate( [ jnp.zeros_like(density_delta[Ellipsis, :1]), jnp.cumsum(density_delta[Ellipsis, :-1], axis=-1), ], axis=-1, ) alpha = 1 - jnp.exp(-density_delta) trans = jnp.exp(log_trans) weights = alpha * trans return weights def compute_alpha_weights( density, tdist, dirs, **kwargs, ): """Helper function for computing alpha compositing weights.""" t_delta = jnp.diff(tdist) delta = t_delta * jnp.linalg.norm(dirs[Ellipsis, None, :], axis=-1) density_delta = density * delta return compute_alpha_weights_helper(density_delta, **kwargs) def volumetric_rendering( rgbs, weights, tdist, bg_rgbs, compute_extras, extras=None, percentiles = (5, 50, 95), ): """Volumetric Rendering Function. Args: rgbs: jnp.ndarray(float32), color, [batch_size, num_samples, 3] weights: jnp.ndarray(float32), weights, [batch_size, num_samples]. tdist: jnp.ndarray(float32), [batch_size, num_samples]. bg_rgbs: jnp.ndarray(float32), the color(s) to use for the background. compute_extras: bool, if True, compute extra quantities besides color. extras: dict, a set of values along rays to render by alpha compositing. percentiles: depth will be returned for these percentiles. Returns: rendering: a dict containing an rgb image of size [batch_size, 3], and other visualizations if compute_extras=True. """ eps = jnp.finfo(jnp.float32).eps rendering = {} acc = weights.sum(axis=-1) bg_w = jnp.maximum(0, 1 - acc[Ellipsis, None]) # The weight of the background. if rgbs is not None: rgb = (weights[Ellipsis, None] * rgbs).sum(axis=-2) + bg_w * bg_rgbs else: rgb = None rendering['rgb'] = rgb if compute_extras: rendering['acc'] = acc if extras is not None: for k, v in extras.items(): if v is not None: rendering[k] = (weights[Ellipsis, None] * v).sum(axis=-2) expectation = lambda x: (weights * x).sum(axis=-1) / jnp.maximum(eps, acc) t_mids = 0.5 * (tdist[Ellipsis, :-1] + tdist[Ellipsis, 1:]) # For numerical stability this expectation is computing using log-distance. rendering['distance_mean'] = jnp.clip( jnp.nan_to_num(jnp.exp(expectation(jnp.log(t_mids))), jnp.inf), tdist[Ellipsis, 0], tdist[Ellipsis, -1], ) # Normalize the weights to sum to 1. weights_norm = weights / jnp.maximum(eps, acc[Ellipsis, None]) distance_percentiles = stepfun.weighted_percentile( tdist, weights_norm, percentiles ) for i, p in enumerate(percentiles): s = 'median' if p == 50 else 'percentile_' + str(p) rendering['distance_' + s] = distance_percentiles[Ellipsis, i] return rendering
evocodebench_data_255
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Camera pose and ray generation utility functions.""" import enum import functools import types from typing import Final, List, Mapping, Optional, Text, Tuple, TypeAlias from absl import logging import chex from internal import configs from internal import geometry from internal import math from internal import rigid_body from internal import spin_math from internal import stepfun from internal import utils import jax from jax import random import jax.numpy as jnp import jaxcam import numpy as np import scipy _Array: TypeAlias = np.ndarray | jnp.ndarray _ScalarArray: TypeAlias = float | _Array _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD: Final[float] = 0.95 def convert_to_ndc( origins, directions, pixtocam, near = 1.0, xnp = np, ): """Converts a set of rays to normalized device coordinates (NDC). Args: origins: ndarray(float32), [..., 3], world space ray origins. directions: ndarray(float32), [..., 3], world space ray directions. pixtocam: ndarray(float32), [3, 3], inverse intrinsic matrix. near: float, near plane along the negative z axis. xnp: either numpy or jax.numpy. Returns: origins_ndc: ndarray(float32), [..., 3]. directions_ndc: ndarray(float32), [..., 3]. This function assumes input rays should be mapped into the NDC space for a perspective projection pinhole camera, with identity extrinsic matrix (pose) and intrinsic parameters defined by inputs focal, width, and height. The near value specifies the near plane of the frustum, and the far plane is assumed to be infinity. The ray bundle for the identity pose camera will be remapped to parallel rays within the (-1, -1, -1) to (1, 1, 1) cube. Any other ray in the original world space can be remapped as long as it has dz < 0 (ray direction has a negative z-coord); this allows us to share a common NDC space for "forward facing" scenes. Note that projection(origins + t * directions) will NOT be equal to origins_ndc + t * directions_ndc and that the directions_ndc are not unit length. Rather, directions_ndc is defined such that the valid near and far planes in NDC will be 0 and 1. See Appendix C in https://arxiv.org/abs/2003.08934 for additional details. """ # Shift ray origins to near plane, such that oz = -near. # This makes the new near bound equal to 0. t = -(near + origins[Ellipsis, 2]) / directions[Ellipsis, 2] origins = origins + t[Ellipsis, None] * directions dx, dy, dz = xnp.moveaxis(directions, -1, 0) ox, oy, oz = xnp.moveaxis(origins, -1, 0) xmult = 1.0 / pixtocam[0, 2] # Equal to -2. * focal / cx ymult = 1.0 / pixtocam[1, 2] # Equal to -2. * focal / cy # Perspective projection into NDC for the t = 0 near points # origins + 0 * directions origins_ndc = xnp.stack( [xmult * ox / oz, ymult * oy / oz, -xnp.ones_like(oz)], axis=-1 ) # Perspective projection into NDC for the t = infinity far points # origins + infinity * directions infinity_ndc = xnp.stack( [xmult * dx / dz, ymult * dy / dz, xnp.ones_like(oz)], axis=-1 ) # directions_ndc points from origins_ndc to infinity_ndc directions_ndc = infinity_ndc - origins_ndc return origins_ndc, directions_ndc def pad_poses(p): """Pad [..., 3, 4] pose matrices with a homogeneous bottom row [0,0,0,1].""" bottom = np.broadcast_to([0, 0, 0, 1.0], p[Ellipsis, :1, :4].shape) return np.concatenate([p[Ellipsis, :3, :4], bottom], axis=-2) def unpad_poses(p): """Remove the homogeneous bottom row from [..., 4, 4] pose matrices.""" return p[Ellipsis, :3, :4] def recenter_poses(poses): """Recenter poses around the origin.""" cam2world = average_pose(poses) transform = np.linalg.inv(pad_poses(cam2world)) poses = transform @ pad_poses(poses) return unpad_poses(poses), transform def average_pose(poses, lock_up = False): """New pose using average position, z-axis, and up vector of input poses.""" position = poses[:, :3, 3].mean(0) z_axis = poses[:, :3, 2].mean(0) up = poses[:, :3, 1].mean(0) cam2world = viewmatrix(z_axis, up, position, lock_up=lock_up) return cam2world def viewmatrix( lookdir, up, position, lock_up = False, ): """Construct lookat view matrix.""" orthogonal_dir = lambda a, b: normalize(np.cross(a, b)) vecs = [None, normalize(up), normalize(lookdir)] # x-axis is always the normalized cross product of `lookdir` and `up`. vecs[0] = orthogonal_dir(vecs[1], vecs[2]) # Default is to lock `lookdir` vector, if lock_up is True lock `up` instead. ax = 2 if lock_up else 1 # Set the not-locked axis to be orthogonal to the other two. vecs[ax] = orthogonal_dir(vecs[(ax + 1) % 3], vecs[(ax + 2) % 3]) m = np.stack(vecs + [position], axis=1) return m def rotation_about_axis(degrees, axis=0): """Creates rotation matrix about one of the coordinate axes.""" radians = degrees / 180.0 * np.pi rot2x2 = np.array( [[np.cos(radians), -np.sin(radians)], [np.sin(radians), np.cos(radians)]] ) r = np.eye(3) r[1:3, 1:3] = rot2x2 r = np.roll(np.roll(r, axis, axis=0), axis, axis=1) p = np.eye(4) p[:3, :3] = r return p def normalize(x): """Normalization helper function.""" return x / np.linalg.norm(x) def focus_point_fn(poses, xnp = np): """Calculate nearest point to all focal axes in poses.""" directions, origins = poses[:, :3, 2:3], poses[:, :3, 3:4] m = xnp.eye(3) - directions * xnp.transpose(directions, [0, 2, 1]) mt_m = xnp.transpose(m, [0, 2, 1]) @ m focus_pt = xnp.linalg.inv(mt_m.mean(0)) @ (mt_m @ origins).mean(0)[:, 0] return focus_pt # Constants for generate_spiral_path(): NEAR_STRETCH = 0.9 # Push forward near bound for forward facing render path. FAR_STRETCH = 5.0 # Push back far bound for forward facing render path. FOCUS_DISTANCE = 0.75 # Relative weighting of near, far bounds for render path. def generate_spiral_path( poses, bounds, n_frames = 120, n_rots = 2, zrate = 0.5, ): """Calculates a forward facing spiral path for rendering.""" # Find a reasonable 'focus depth' for this dataset as a weighted average # of conservative near and far bounds in disparity space. near_bound = bounds.min() * NEAR_STRETCH far_bound = bounds.max() * FAR_STRETCH # All cameras will point towards the world space point (0, 0, -focal). focal = 1 / (((1 - FOCUS_DISTANCE) / near_bound + FOCUS_DISTANCE / far_bound)) # Get radii for spiral path using 90th percentile of camera positions. positions = poses[:, :3, 3] radii = np.percentile(np.abs(positions), 90, 0) radii = np.concatenate([radii, [1.0]]) # Generate poses for spiral path. render_poses = [] cam2world = average_pose(poses) up = poses[:, :3, 1].mean(0) for theta in np.linspace(0.0, 2.0 * np.pi * n_rots, n_frames, endpoint=False): t = radii * [np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.0] position = cam2world @ t lookat = cam2world @ [0, 0, -focal, 1.0] z_axis = position - lookat render_poses.append(viewmatrix(z_axis, up, position)) render_poses = np.stack(render_poses, axis=0) return render_poses def transform_poses_pca(poses): """Transforms poses so principal components lie on XYZ axes. Args: poses: a (N, 3, 4) array containing the cameras' camera to world transforms. Returns: A tuple (poses, transform), with the transformed poses and the applied camera_to_world transforms. """ t = poses[:, :3, 3] t_mean = t.mean(axis=0) t = t - t_mean eigval, eigvec = np.linalg.eig(t.T @ t) # Sort eigenvectors in order of largest to smallest eigenvalue. inds = np.argsort(eigval)[::-1] eigvec = eigvec[:, inds] rot = eigvec.T if np.linalg.det(rot) < 0: rot = np.diag(np.array([1, 1, -1])) @ rot transform = np.concatenate([rot, rot @ -t_mean[:, None]], -1) poses_recentered = unpad_poses(transform @ pad_poses(poses)) transform = np.concatenate([transform, np.eye(4)[3:]], axis=0) # Flip coordinate system if z component of y-axis is negative if poses_recentered.mean(axis=0)[2, 1] < 0: poses_recentered = np.diag(np.array([1, -1, -1])) @ poses_recentered transform = np.diag(np.array([1, -1, -1, 1])) @ transform # Just make sure it's it in the [-1, 1]^3 cube scale_factor = 1.0 / np.max(np.abs(poses_recentered[:, :3, 3])) poses_recentered[:, :3, 3] *= scale_factor transform = np.diag(np.array([scale_factor] * 3 + [1])) @ transform return poses_recentered, transform def transform_poses_focus(poses): """Transforms poses so that the "focus point" of capture is at the origin. Args: poses: a (N, 3, 4) array containing the cameras' camera to world transforms. Returns: A tuple (poses, transform), with the transformed poses and the applied camera_to_world transforms. """ # Move the focus point to the origin. focus_point = focus_point_fn(poses) # Use average up vector as the Z axis. swap_y_z = np.array([ [1, 0, 0], [0, 0, 1], [0, -1, 0.0], ]) rot = average_pose(poses, lock_up=True)[:3, :3] @ swap_y_z transform = np.concatenate([rot.T, rot.T @ -focus_point[:, None]], -1) poses_recentered = transform @ pad_poses(poses) transform = np.concatenate([transform, np.eye(4)[3:]], axis=0) # Just make sure it's it in the [-1, 1]^3 cube scale_factor = 1.0 / np.max(np.abs(poses_recentered[:, :3, 3])) poses_recentered[:, :3, 3] *= scale_factor transform = np.diag(np.array([scale_factor] * 3 + [1])) @ transform return poses_recentered, transform def generate_ellipse_path( poses, n_frames = 120, const_speed = True, z_variation = 0.0, z_phase = 0.0, rad_mult_min = 1.0, rad_mult_max = 1.0, render_rotate_xaxis = 0.0, render_rotate_yaxis = 0.0, use_avg_z_height = False, z_height_percentile = None, lock_up = False, ): """Generate an elliptical render path based on the given poses.""" # Calculate the focal point for the path (cameras point toward this). center = focus_point_fn(poses) # Default path height sits at z=0 (in middle of zero-mean capture pattern). xy_offset = center[:2] # Calculate lengths for ellipse axes based on input camera positions. xy_radii = np.percentile(np.abs(poses[:, :2, 3] - xy_offset), 90, axis=0) # Use ellipse that is symmetric about the focal point in xy. xy_low = xy_offset - xy_radii xy_high = xy_offset + xy_radii # Optional height variation, need not be symmetric. z_min = np.percentile((poses[:, 2, 3]), 10, axis=0) z_max = np.percentile((poses[:, 2, 3]), 90, axis=0) if use_avg_z_height or z_height_percentile is not None: # Center the path vertically around the average camera height, good for # datasets recentered by transform_poses_focus function. if z_height_percentile is None: z_init = poses[:, 2, 3].mean(axis=0) else: z_init = np.percentile(poses[:, 2, 3], z_height_percentile, axis=0) else: # Center the path at zero, good for datasets recentered by # transform_poses_pca function. z_init = 0 z_low = z_init + z_variation * (z_min - z_init) z_high = z_init + z_variation * (z_max - z_init) xyz_low = np.array([*xy_low, z_low]) xyz_high = np.array([*xy_high, z_high]) def get_positions(theta): # Interpolate between bounds with trig functions to get ellipse in x-y. # Optionally also interpolate in z to change camera height along path. t_x = np.cos(theta) * 0.5 + 0.5 t_y = np.sin(theta) * 0.5 + 0.5 t_z = np.cos(theta + 2 * np.pi * z_phase) * 0.5 + 0.5 t_xyz = np.stack([t_x, t_y, t_z], axis=-1) positions = xyz_low + t_xyz * (xyz_high - xyz_low) # Interpolate between min and max radius multipliers so the camera zooms in # and out of the scene center. t = np.sin(theta) * 0.5 + 0.5 rad_mult = rad_mult_min + (rad_mult_max - rad_mult_min) * t positions = center + (positions - center) * rad_mult[:, None] return positions theta = np.linspace(0, 2.0 * np.pi, n_frames + 1, endpoint=True) positions = get_positions(theta) if const_speed: # Resample theta angles so that the velocity is closer to constant. lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1) theta = stepfun.sample(None, theta, np.log(lengths), n_frames + 1) positions = get_positions(theta) # Throw away duplicated last position. positions = positions[:-1] # Set path's up vector to axis closest to average of input pose up vectors. avg_up = poses[:, :3, 1].mean(0) avg_up = avg_up / np.linalg.norm(avg_up) ind_up = np.argmax(np.abs(avg_up)) up = np.eye(3)[ind_up] * np.sign(avg_up[ind_up]) poses = np.stack([viewmatrix(p - center, up, p, lock_up) for p in positions]) poses = poses @ rotation_about_axis(-render_rotate_yaxis, axis=1) poses = poses @ rotation_about_axis(render_rotate_xaxis, axis=0) return poses def generate_interpolated_path( poses, n_interp, spline_degree = 5, smoothness = 0.03, rot_weight = 0.1, lock_up = False, fixed_up_vector = None, lookahead_i = None, frames_per_colmap = None, const_speed = False, n_buffer = None, periodic = False, n_interp_as_total = False, ): """Creates a smooth spline path between input keyframe camera poses. Spline is calculated with poses in format (position, lookat-point, up-point). Args: poses: (n, 3, 4) array of input pose keyframes. n_interp: returned path will have n_interp * (n - 1) total poses. spline_degree: polynomial degree of B-spline. smoothness: parameter for spline smoothing, 0 forces exact interpolation. rot_weight: relative weighting of rotation/translation in spline solve. lock_up: if True, forced to use given Up and allow Lookat to vary. fixed_up_vector: replace the interpolated `up` with a fixed vector. lookahead_i: force the look direction to look at the pose `i` frames ahead. frames_per_colmap: conversion factor for the desired average velocity. const_speed: renormalize spline to have constant delta between each pose. n_buffer: Number of buffer frames to insert at the start and end of the path. Helps keep the ends of a spline path straight. periodic: make the spline path periodic (perfect loop). n_interp_as_total: use n_interp as total number of poses in path rather than the number of poses to interpolate between each input. Returns: Array of new camera poses with shape (n_interp * (n - 1), 3, 4), or (n_interp, 3, 4) if n_interp_as_total is set. """ def poses_to_points(poses, dist): """Converts from pose matrices to (position, lookat, up) format.""" pos = poses[:, :3, -1] lookat = poses[:, :3, -1] - dist * poses[:, :3, 2] up = poses[:, :3, -1] + dist * poses[:, :3, 1] return np.stack([pos, lookat, up], 1) def points_to_poses(points): """Converts from (position, lookat, up) format to pose matrices.""" poses = [] for i in range(len(points)): pos, lookat_point, up_point = points[i] if lookahead_i is not None: if i + lookahead_i < len(points): lookat = pos - points[i + lookahead_i][0] else: lookat = pos - lookat_point up = (up_point - pos) if fixed_up_vector is None else fixed_up_vector poses.append(viewmatrix(lookat, up, pos, lock_up=lock_up)) return np.array(poses) def insert_buffer_poses(poses, n_buffer): """Insert extra poses at the start and end of the path.""" def average_distance(points): distances = np.linalg.norm(points[1:] - points[0:-1], axis=-1) return np.mean(distances) def shift(pose, dz): result = np.copy(pose) z = result[:3, 2] z /= np.linalg.norm(z) # Move along forward-backward axis. -z is forward. result[:3, 3] += z * dz return result dz = average_distance(poses[:, :3, 3]) prefix = np.stack([shift(poses[0], (i + 1) * dz) for i in range(n_buffer)]) prefix = prefix[::-1] # reverse order suffix = np.stack( [shift(poses[-1], -(i + 1) * dz) for i in range(n_buffer)] ) result = np.concatenate([prefix, poses, suffix]) return result def remove_buffer_poses(poses, u, n_frames, u_keyframes, n_buffer): u_keyframes = u_keyframes[n_buffer:-n_buffer] mask = (u >= u_keyframes[0]) & (u <= u_keyframes[-1]) poses = poses[mask] u = u[mask] n_frames = len(poses) return poses, u, n_frames, u_keyframes def interp(points, u, k, s): """Runs multidimensional B-spline interpolation on the input points.""" sh = points.shape pts = np.reshape(points, (sh[0], -1)) k = min(k, sh[0] - 1) tck, u_keyframes = scipy.interpolate.splprep(pts.T, k=k, s=s, per=periodic) new_points = np.array(scipy.interpolate.splev(u, tck)) new_points = np.reshape(new_points.T, (len(u), sh[1], sh[2])) return new_points, u_keyframes if n_buffer is not None: poses = insert_buffer_poses(poses, n_buffer) points = poses_to_points(poses, dist=rot_weight) if n_interp_as_total: n_frames = n_interp + 1 # Add extra since final pose is discarded. else: n_frames = n_interp * (points.shape[0] - 1) u = np.linspace(0, 1, n_frames, endpoint=True) new_points, u_keyframes = interp(points, u=u, k=spline_degree, s=smoothness) poses = points_to_poses(new_points) if n_buffer is not None: poses, u, n_frames, u_keyframes = remove_buffer_poses( poses, u, n_frames, u_keyframes, n_buffer ) if frames_per_colmap is not None: # Recalculate the number of frames to achieve desired average velocity. positions = poses[:, :3, -1] lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1) total_length_colmap = lengths.sum() print('old n_frames:', n_frames) print('total_length_colmap:', total_length_colmap) n_frames = int(total_length_colmap * frames_per_colmap) print('new n_frames:', n_frames) u = np.linspace( np.min(u_keyframes), np.max(u_keyframes), n_frames, endpoint=True ) new_points, _ = interp(points, u=u, k=spline_degree, s=smoothness) poses = points_to_poses(new_points) if const_speed: # Resample timesteps so that the velocity is nearly constant. positions = poses[:, :3, -1] lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1) u = stepfun.sample(None, u, np.log(lengths), n_frames + 1) new_points, _ = interp(points, u=u, k=spline_degree, s=smoothness) poses = points_to_poses(new_points) return poses[:-1], u[:-1], u_keyframes def safe_interpolate_1d( x, spline_degree, smoothness, t_input, t_output, ): """Interpolate 1d signal x (defined at t_input and queried at t_output).""" # TODO(bmild): switch interpolation t values to match those chosen for path. # One needs at least n=k+1 points to fit a polynomial of degree k to n points. n = len(x) spline_degree = min(spline_degree, n - 1) if spline_degree > 0: tck = scipy.interpolate.splrep(t_input, x, s=smoothness, k=spline_degree) return scipy.interpolate.splev(t_output, tck).astype(x.dtype) else: # n = 0 or 1 fill_value = x[0] if n else 0.0 return np.full(t_output.shape, fill_value, dtype=x.dtype) def identify_file_names(dir_or_text_file): """Load filenames from text file or directory.""" if utils.isdir(dir_or_text_file): # If `dir_or_text_file` is a directory, grab the filenames. subset_names = sorted(utils.listdir(dir_or_text_file)) else: # If `dir_or_text_file` is a text file, treat each line as a filename. with utils.open_file(dir_or_text_file, 'r') as fp: names = fp.read() if isinstance(names, bytes): names = names.decode('utf-8') # Decode bytes into string and split into lines. subset_names = names.splitlines() return subset_names def identify_file_indices( dir_or_text_file, file_names ): """Computes indices for a subset of files out of a larger list.""" # Load file names. subset_names = identify_file_names(dir_or_text_file) # COLMAP sometimes doesn't reconstruct all images, which results in some files # being missing. if not set(subset_names).issubset(file_names): subset_names_missing_from_file_names = set(subset_names) - set(file_names) logging.warning( 'Some files from subset are missing in the file names:\n%s', ' '.join(str(x) for x in subset_names_missing_from_file_names), ) missing_subset_names_threshold = len( subset_names_missing_from_file_names ) / len(subset_names) if ( missing_subset_names_threshold > _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD ): raise ValueError( f'{missing_subset_names_threshold*100}% of subset files is missing' f' from file_names: {subset_names_missing_from_file_names}' ) file_names_set = set(file_names) # Get indices corresponding to the subset filenames. Ensure that the order # used in subset_names is preserved. indices = [file_names.index(n) for n in subset_names if n in file_names_set] indices = np.array(indices) return indices def get_meters_per_colmap_from_calibration_images( config, poses, image_names ): """Uses calibration images to get how many meters is a single COLMAP unit.""" # By default, the input camera poses are scaled to fit in the [-1, 1]^3 cube. # This default value implies a scaling of 2 / .25 = 8 meters between the # farthest apart camera poses. meters_per_colmap = 8.0 if config.render_calibration_keyframes is not None: # Use provided calibration keyframes to determine metric world scale. calib_names = identify_file_names(config.render_calibration_keyframes) indices = [] for i in range(0, len(calib_names), 2): # Grab pairs of calibration images filenames. name0, name1 = calib_names[i : i + 2] # Check if both are in the set of colmap-posed images. if name0 in image_names and name1 in image_names: indices.append((image_names.index(name0), image_names.index(name1))) if indices: # Extract colmap-space positions from the camera pose matrices. positions = poses[indices][Ellipsis, :3, -1] # Every pair of calibration keyframes should have world space distance # `render_calibration_distance` according to the capture handbook. colmap_lengths = np.linalg.norm( positions[:, 0] - positions[:, 1], axis=-1 ) colmap_length = colmap_lengths.mean(axis=0) # Ratio of world distance to colmap distance. meters_per_colmap = config.render_calibration_distance / colmap_length print('colmap lengths', colmap_lengths) print('avg', colmap_length) print('meters_per_colmap', meters_per_colmap) return meters_per_colmap def calibrate_spline_speed( config, poses, image_names ): """Uses input config to determine a conversion factor for the spline speed.""" if config.render_spline_meters_per_sec is None: return None meters_per_colmap = get_meters_per_colmap_from_calibration_images( config, poses, image_names ) meters_per_sec = config.render_spline_meters_per_sec frames_per_sec = config.render_video_fps frames_per_colmap = meters_per_colmap / meters_per_sec * frames_per_sec print('returning frames_per_colmap', frames_per_colmap) return frames_per_colmap def create_render_spline_path( config, image_names, poses, exposures, ): """Creates spline interpolation render path from subset of dataset poses. Args: config: configs.Config object. image_names: a list of image filenames. poses: [N, 3, 4] array of extrinsic camera pose matrices. exposures: optional list of floating point exposure values. Returns: spline_indices: list of indices used to select spline keyframe poses. render_poses: array of interpolated extrinsic camera poses for the path. render_exposures: optional list of interpolated exposures for the path. """ def remove_outlier_spline_indices( spline_indices, poses, q_max, q_mult ): """Identify spline indices correspond to inlier poses.""" poses = poses[spline_indices] points = poses[:, :3, -1] distances = np.linalg.norm(points[1:] - points[:-1], axis=-1) mask = distances < q_mult * np.quantile(distances, q_max) mask = np.concatenate([mask, [True]], axis=0) # Keep the last pose. num_inliers = int(np.sum(mask)) num_total = len(spline_indices) print( f'remove_outlier_spline_indices: {num_inliers}/{num_total} spline ' 'path poses remaining after outlier removal.' ) return spline_indices[mask] # Grab poses corresponding to the image filenames. spline_indices = identify_file_indices( config.render_spline_keyframes, image_names ) if ( config.render_spline_outlier_keyframe_quantile is not None and config.render_spline_outlier_keyframe_multiplier is not None ): spline_indices = remove_outlier_spline_indices( spline_indices, poses, q_max=config.render_spline_outlier_keyframe_quantile, q_mult=config.render_spline_outlier_keyframe_multiplier, ) keyframes = poses[spline_indices] frames_per_colmap = calibrate_spline_speed(config, poses, image_names) if config.render_spline_fixed_up: # Fix path to use world-space "up" vector instead of "banking" with spline. all_up_vectors = poses[:, :3, 1] # second column of pose matrix is up. fixed_up_vector = normalize(all_up_vectors.mean(axis=0)) else: fixed_up_vector = None render_poses, frame_timesteps, keyframe_timesteps = ( generate_interpolated_path( keyframes, n_interp=config.render_spline_n_interp, spline_degree=config.render_spline_degree, smoothness=config.render_spline_smoothness, rot_weight=config.render_spline_rot_weight, lock_up=config.render_spline_lock_up, fixed_up_vector=fixed_up_vector, lookahead_i=config.render_spline_lookahead_i, frames_per_colmap=frames_per_colmap, const_speed=config.render_spline_const_speed, n_buffer=config.render_spline_n_buffer, ) ) if config.render_spline_interpolate_exposure: if exposures is None: raise ValueError( 'config.render_spline_interpolate_exposure is True but ' 'create_render_spline_path() was passed exposures=None.' ) # Interpolate per-frame exposure value. log_exposure = np.log(exposures[spline_indices]) # Use aggressive smoothing for exposure interpolation to avoid flickering. log_exposure_interp = safe_interpolate_1d( log_exposure, spline_degree=5, smoothness=config.render_spline_interpolate_exposure_smoothness, t_input=keyframe_timesteps, t_output=frame_timesteps, ) render_exposures = np.exp(log_exposure_interp) else: render_exposures = None return spline_indices, render_poses, render_exposures def intrinsic_matrix( fx, fy, cx, cy, xnp = np, ): """Intrinsic matrix for a pinhole camera in OpenCV coordinate system.""" return xnp.array([ [fx, 0, cx], [0, fy, cy], [0, 0, 1.0], ]) def get_pixtocam( focal, width, height, xnp = np, ): """Inverse intrinsic matrix for a perfect pinhole camera.""" camtopix = intrinsic_matrix(focal, focal, width * 0.5, height * 0.5, xnp) return xnp.linalg.inv(camtopix) def pixel_coordinates( width, height, xnp = np ): """Tuple of the x and y integer coordinates for a grid of pixels.""" return xnp.meshgrid(xnp.arange(width), xnp.arange(height), indexing='xy') def _radial_and_tangential_distort( x, y, k1 = 0, k2 = 0, k3 = 0, k4 = 0, p1 = 0, p2 = 0, ): """Computes the distorted pixel positions.""" r2 = x * x + y * y radial_distortion = r2 * (k1 + r2 * (k2 + r2 * (k3 + r2 * k4))) dx_radial = x * radial_distortion dy_radial = y * radial_distortion dx_tangential = 2 * p1 * x * y + p2 * (r2 + 2 * x * x) dy_tangential = 2 * p2 * x * y + p1 * (r2 + 2 * y * y) return x + dx_radial + dx_tangential, y + dy_radial + dy_tangential def _compute_residual_and_jacobian( x, y, xd, yd, k1 = 0.0, k2 = 0.0, k3 = 0.0, k4 = 0.0, p1 = 0.0, p2 = 0.0, ): """Auxiliary function of radial_and_tangential_undistort().""" # Adapted from https://github.com/google/nerfies/blob/main/nerfies/camera.py # let r(x, y) = x^2 + y^2; # d(x, y) = 1 + k1 * r(x, y) + k2 * r(x, y) ^2 + k3 * r(x, y)^3 + # k4 * r(x, y)^4; r = x * x + y * y d = 1.0 + r * (k1 + r * (k2 + r * (k3 + r * k4))) # The perfect projection is: # xd = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2); # yd = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2); # # Let's define # # fx(x, y) = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2) - xd; # fy(x, y) = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2) - yd; # # We are looking for a solution that satisfies # fx(x, y) = fy(x, y) = 0; fx = d * x + 2 * p1 * x * y + p2 * (r + 2 * x * x) - xd fy = d * y + 2 * p2 * x * y + p1 * (r + 2 * y * y) - yd # Compute derivative of d over [x, y] d_r = k1 + r * (2.0 * k2 + r * (3.0 * k3 + r * 4.0 * k4)) d_x = 2.0 * x * d_r d_y = 2.0 * y * d_r # Compute derivative of fx over x and y. fx_x = d + d_x * x + 2.0 * p1 * y + 6.0 * p2 * x fx_y = d_y * x + 2.0 * p1 * x + 2.0 * p2 * y # Compute derivative of fy over x and y. fy_x = d_x * y + 2.0 * p2 * y + 2.0 * p1 * x fy_y = d + d_y * y + 2.0 * p2 * x + 6.0 * p1 * y return fx, fy, fx_x, fx_y, fy_x, fy_y def _radial_and_tangential_undistort( xd, yd, k1 = 0, k2 = 0, k3 = 0, k4 = 0, p1 = 0, p2 = 0, eps = 1e-9, max_iterations=10, xnp = np, ): """Computes undistorted (x, y) from (xd, yd).""" # From https://github.com/google/nerfies/blob/main/nerfies/camera.py # Initialize from the distorted point. x = xnp.copy(xd) y = xnp.copy(yd) for _ in range(max_iterations): fx, fy, fx_x, fx_y, fy_x, fy_y = _compute_residual_and_jacobian( x=x, y=y, xd=xd, yd=yd, k1=k1, k2=k2, k3=k3, k4=k4, p1=p1, p2=p2 ) denominator = fy_x * fx_y - fx_x * fy_y x_numerator = fx * fy_y - fy * fx_y y_numerator = fy * fx_x - fx * fy_x step_x = xnp.where( xnp.abs(denominator) > eps, x_numerator / denominator, xnp.zeros_like(denominator), ) step_y = xnp.where( xnp.abs(denominator) > eps, y_numerator / denominator, xnp.zeros_like(denominator), ) x = x + step_x y = y + step_y return x, y class ProjectionType(enum.Enum): """Camera projection type (perspective pinhole, fisheye, or 360 pano).""" PERSPECTIVE = 'perspective' FISHEYE = 'fisheye' PANORAMIC = 'pano' def pixels_to_rays( pix_x_int, pix_y_int, pixtocams, camtoworlds, distortion_params = None, pixtocam_ndc = None, camtype = ProjectionType.PERSPECTIVE, xnp = np, ): """Calculates rays given pixel coordinates, intrinisics, and extrinsics. Given 2D pixel coordinates pix_x_int, pix_y_int for cameras with inverse intrinsics pixtocams and extrinsics camtoworlds (and optional distortion coefficients distortion_params and NDC space projection matrix pixtocam_ndc), computes the corresponding 3D camera rays. Vectorized over the leading dimensions of the first four arguments. Args: pix_x_int: int array, shape SH, x coordinates of image pixels. pix_y_int: int array, shape SH, y coordinates of image pixels. pixtocams: float array, broadcastable to SH + [3, 3], inverse intrinsics. camtoworlds: float array, broadcastable to SH + [3, 4], camera extrinsics. distortion_params: dict of floats, optional camera distortion parameters. pixtocam_ndc: float array, [3, 3], optional inverse intrinsics for NDC. camtype: camera_utils.ProjectionType, fisheye or perspective camera. xnp: either numpy or jax.numpy. Returns: origins: float array, shape SH + [3], ray origin points. directions: float array, shape SH + [3], ray direction vectors. viewdirs: float array, shape SH + [3], normalized ray direction vectors. radii: float array, shape SH + [1], ray differential radii. imageplane: float array, shape SH + [2], xy coordinates on the image plane. If the image plane is at world space distance 1 from the pinhole, then imageplane will be the xy coordinates of a pixel in that space (so the camera ray direction at the origin would be (x, y, -1) in OpenGL coords). """ # Must add half pixel offset to shoot rays through pixel centers. def pix_to_dir(x, y): return xnp.stack([x + 0.5, y + 0.5, xnp.ones_like(x)], axis=-1) # We need the dx and dy rays to calculate ray radii for mip-NeRF cones. pixel_dirs_stacked = xnp.stack( [ pix_to_dir(pix_x_int, pix_y_int), pix_to_dir(pix_x_int + 1, pix_y_int), pix_to_dir(pix_x_int, pix_y_int + 1), ], axis=0, ) # For jax, need to specify high-precision matmul. matmul = math.matmul if xnp == jnp else xnp.matmul mat_vec_mul = lambda A, b: matmul(A, b[Ellipsis, None])[Ellipsis, 0] # Apply inverse intrinsic matrices. camera_dirs_stacked = mat_vec_mul(pixtocams, pixel_dirs_stacked) if distortion_params is not None: # Correct for distortion. x, y = _radial_and_tangential_undistort( camera_dirs_stacked[Ellipsis, 0], camera_dirs_stacked[Ellipsis, 1], **distortion_params, xnp=xnp, ) camera_dirs_stacked = xnp.stack([x, y, xnp.ones_like(x)], -1) if camtype == ProjectionType.FISHEYE: theta = xnp.sqrt(xnp.sum(xnp.square(camera_dirs_stacked[Ellipsis, :2]), axis=-1)) theta = xnp.minimum(xnp.pi, theta) sin_theta_over_theta = xnp.sin(theta) / theta camera_dirs_stacked = xnp.stack( [ camera_dirs_stacked[Ellipsis, 0] * sin_theta_over_theta, camera_dirs_stacked[Ellipsis, 1] * sin_theta_over_theta, xnp.cos(theta), ], axis=-1, ) elif camtype == ProjectionType.PANORAMIC: theta = camera_dirs_stacked[Ellipsis, 0] phi = camera_dirs_stacked[Ellipsis, 1] # Negation on y and z components accounts for expected OpenCV convention. camera_dirs_stacked = xnp.stack( [ -xnp.sin(phi) * xnp.sin(theta), -xnp.cos(phi), -xnp.sin(phi) * xnp.cos(theta), ], axis=-1, ) # Flip from OpenCV to OpenGL coordinate system. camera_dirs_stacked = matmul( camera_dirs_stacked, xnp.diag(xnp.array([1.0, -1.0, -1.0])) ) # Extract 2D image plane (x, y) coordinates. imageplane = camera_dirs_stacked[0, Ellipsis, :2] # Apply camera rotation matrices. directions_stacked = mat_vec_mul( camtoworlds[Ellipsis, :3, :3], camera_dirs_stacked ) # Extract the offset rays. directions, dx, dy = directions_stacked origins = xnp.broadcast_to(camtoworlds[Ellipsis, :3, -1], directions.shape) viewdirs = directions / xnp.linalg.norm(directions, axis=-1, keepdims=True) if pixtocam_ndc is None: # Distance from each unit-norm direction vector to its neighbors. dx_norm = xnp.linalg.norm(dx - directions, axis=-1) dy_norm = xnp.linalg.norm(dy - directions, axis=-1) else: # Convert ray origins and directions into projective NDC space. ndc_fn = functools.partial(convert_to_ndc, pixtocam=pixtocam_ndc, xnp=xnp) origins_dx, _ = ndc_fn(origins, dx) origins_dy, _ = ndc_fn(origins, dy) origins, directions = ndc_fn(origins, directions) # In NDC space, we use the offset between origins instead of directions. dx_norm = xnp.linalg.norm(origins_dx - origins, axis=-1) dy_norm = xnp.linalg.norm(origins_dy - origins, axis=-1) # Cut the distance in half, multiply it to match the variance of a uniform # distribution the size of a pixel (1/12, see paper). # TODO(barron): Add a unit test that this is correct. radii = (0.5 * (dx_norm + dy_norm))[Ellipsis, None] * 2 / xnp.sqrt(12) return origins, directions, viewdirs, radii, imageplane def points_to_pixels( points, pixtocams, camtoworlds, distortion_params = None, camtype = ProjectionType.PERSPECTIVE, xnp = np, ): """Calculates pixel coordinates given 3D points, intrinisics, and extrinsics. Given 3D point coordinates points and cameras with inverse intrinsics pixtocams and extrinsics camtoworlds (and optional distortion coefficients distortion_params), computes the corresponding 2D pixel coordinates. Vectorized over the leading dimensions of the first four arguments. Args: points: float array, [..., 3], 3D coordinates of points to project. pixtocams: float array, [..., 3, 3], inverse intrinsics. camtoworlds: float array, [..., 3, 4], camera extrinsics. distortion_params: dict of floats or float arrays [...], optional camera distortion parameters. camtype: camera_utils.ProjectionType, type of camera model. xnp: either numpy (host compute) or jax.numpy (device compute). Returns: coordinates: float array, [..., 2], pixel coordinates. depth: float array, [...], per-point orthographic depth. """ if camtype != ProjectionType.PERSPECTIVE: raise ValueError(f'points_to_pixels only supports perspective projection, ' f'not {camtype} mode.') # For jax, need to specify high-precision matmul. matmul = math.matmul if xnp == jnp else xnp.matmul mat_vec_mul = lambda A, b: matmul(A, b[Ellipsis, None])[Ellipsis, 0] rotation = camtoworlds[Ellipsis, :3, :3] rotation_inv = xnp.swapaxes(rotation, -1, -2) translation = camtoworlds[Ellipsis, :3, -1] # Points (directions) in the camera coordinate frame. points_camera = mat_vec_mul(rotation_inv, points - translation) # Projection to image plane by dividing out -z. depth = -points_camera[Ellipsis, -1] camera_dirs = points_camera / depth[Ellipsis, None] # OpenGL to OpenCV coordinates. camera_dirs = matmul(camera_dirs, xnp.diag(xnp.array([1.0, -1.0, -1.0]))) if distortion_params is not None: # Correct for distortion. x, y = _radial_and_tangential_distort( camera_dirs[Ellipsis, 0], camera_dirs[Ellipsis, 1], **distortion_params, ) camera_dirs = xnp.stack([x, y, xnp.ones_like(x)], -1) # Apply intrinsics matrix. pixel_dirs = mat_vec_mul(xnp.linalg.inv(pixtocams), camera_dirs) # Remove half pixel offset. coordinates = pixel_dirs[Ellipsis, :2] - xnp.array([0.5, 0.5]) return coordinates, depth def rays_planes_intersection( z_min, z_max, origins, directions, xnp = np, ): """Crops rays to a range of z values. This is useful for situations where the scene lies within a range of altitudes, but the cameras are very far away, as with aerial data. Args: z_min: float z value of the lower cropping plane. z_max: float z value of the upper cropping plane. origins: ray origins points. directions: ray direction vectors. xnp: either numpy or jax.numpy. Returns: t_min: parametric location of the cropped ray origins t_max: parametric location of the ends of the cropped rays """ t1 = (z_min - origins[Ellipsis, 2]) / directions[Ellipsis, 2] t2 = (z_max - origins[Ellipsis, 2]) / directions[Ellipsis, 2] t_min = xnp.maximum(0, xnp.minimum(t1, t2)) t_max = xnp.maximum(t1, t2) return t_min, t_max def _intersect_ranges( r1, r2, xnp = np, ): start = xnp.maximum(r1[0], r2[0]) end = xnp.minimum(r1[1], r2[1]) return (start, end) def ray_box_intersection( ray_o, ray_d, corners, xnp = np ): """Returns enter/exit distances along the ray for box defined by `corners`.""" t1 = (corners[0] - ray_o) / ray_d t2 = (corners[1] - ray_o) / ray_d t_min = xnp.minimum(t1, t2).max(axis=-1) t_max = xnp.maximum(t1, t2).min(axis=-1) return t_min, t_max def modify_rays_with_bbox( rays, corners, xnp = np ): """Sets near/far by bbox intersection and multiplies lossmult by mask.""" lossmult = rays.lossmult near = rays.near far = rays.far t_min, t_max = ray_box_intersection( rays.origins, rays.directions, corners, xnp=xnp ) t_min, t_max = t_min[Ellipsis, None], t_max[Ellipsis, None] hits = t_min <= t_max inear, ifar = _intersect_ranges((near, far), (t_min, t_max), xnp=xnp) overlaps = inear <= ifar valid = hits * overlaps if lossmult is None: lossmult = valid.astype(xnp.float32) else: lossmult = xnp.where(valid, lossmult, 0.0) near = xnp.where(valid, inear, 0.0) far = xnp.where(valid, ifar, 0.0) return rays.replace(lossmult=lossmult, near=near, far=far) def ray_sphere_intersection( ray_o, ray_d, center, radius, xnp = np, ): """Calculates distance to hit a sphere for a ray. Args: ray_o: Ray origin (..., 3) ray_d: Ray direction (..., 3) center: Sphere center (..., 3) radius: Sphere radius (..., 1) xnp: Numpy or Jax module Returns: t_min, t_max, hit. When no hit is found, t_min = t_max = 0. """ oc = ray_o - center a = (ray_d**2).sum(axis=-1) b = 2 * (oc * ray_d).sum(axis=-1) c = (oc * oc).sum(axis=-1) - radius**2 det = b**2 - 4.0 * a * c hit = (det >= 0) * (a > 0) # Nb: Results are 'wrong' if valid = false, this is just to make jax # not freak out. det = xnp.where(hit, det, 0.0) a = xnp.where(hit, a, 1.0) t_min = xnp.where(hit, (-b - xnp.sqrt(det)) / (2.0 * a), 0.0) t_max = xnp.where(hit, (-b + xnp.sqrt(det)) / (2.0 * a), 0.0) return t_min, t_max, hit def gather_cameras(cameras, cam_idx, xnp=np): """Gathers relevant camera parameters for each ray.""" pixtocams, camtoworlds, distortion_params = cameras[:3] if pixtocams.ndim > 2: pixtocams_idx = pixtocams[cam_idx] else: pixtocams_idx = pixtocams if camtoworlds.ndim > 2: camtoworlds_idx = camtoworlds[cam_idx] else: camtoworlds_idx = camtoworlds if distortion_params is not None: distortion_params_idx = {} for k, v in distortion_params.items(): # pytype: disable=attribute-error # jax-ndarray if not xnp.isscalar(v): distortion_params_idx[k] = v[cam_idx] else: distortion_params_idx[k] = v else: distortion_params_idx = None return ( pixtocams_idx, camtoworlds_idx, distortion_params_idx, ) def cast_ray_batch( cameras, rays, camtype = ProjectionType.PERSPECTIVE, scene_bbox = None, xnp = np, ): """Maps from input cameras and uncast Rays batch to output cast Rays batch. `cameras` is a Tuple of five sets of camera parameters. pixtocams: 1 or N stacked [3, 3] inverse intrinsic matrices. camtoworlds: 1 or N stacked [3, 4] extrinsic pose matrices. distortion_params: optional, dict[str, float] containing pinhole model distortion parameters. pixtocam_ndc: optional, [3, 3] inverse intrinsic matrix for mapping to NDC. z_range: optional range of Z values Args: cameras: described above. rays: ray data including integer pixel coordinates and camera indices. These fields can be an arbitrary batch shape. camtype: camera_utils.ProjectionType, fisheye or perspective camera. scene_bbox: min and max corner of scene bounding box, if applicable. xnp: either numpy or jax.numpy. Returns: rays: Rays dataclass with computed 3D world space ray data. """ # rays.cam_idx has shape [..., 1], remove this hanging dimension. cam_idx = rays.cam_idx[Ellipsis, 0] cameras_idx = gather_cameras(cameras, cam_idx, xnp=xnp) pixtocams, camtoworlds, distortion_params = cameras_idx pixtocam_ndc, z_range = cameras[3:5] # Compute rays from pixel coordinates. origins, directions, viewdirs, radii, imageplane = pixels_to_rays( rays.pixels[Ellipsis, 0], rays.pixels[Ellipsis, 1], pixtocams, camtoworlds, distortion_params=distortion_params, pixtocam_ndc=pixtocam_ndc, camtype=camtype, xnp=xnp, ) if z_range is not None: t_min, t_max = rays_planes_intersection( z_range[0], z_range[1], origins, directions, xnp ) t_min = xnp.broadcast_to(t_min[Ellipsis, None], origins.shape) t_max = xnp.broadcast_to(t_max[Ellipsis, None], origins.shape) hit_mask = t_max < t_min origins = xnp.where(hit_mask, origins, origins + directions * t_min) directions = xnp.where(hit_mask, directions, directions * (t_max - t_min)) # Preserve all metadata and add the cast rays. rays = rays.replace( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, ) if scene_bbox is not None: rays = modify_rays_with_bbox(rays, scene_bbox, xnp=xnp) return rays def cast_general_rays( camtoworld, pixtocam, height, width, near, far, distortion_params = None, pixtocam_ndc = None, camtype = ProjectionType.PERSPECTIVE, xnp = np, ): """Wrapper for generating a general ray batch.""" pix_x_int, pix_y_int = pixel_coordinates(width, height, xnp=xnp) ray_args = pixels_to_rays( pix_x_int, pix_y_int, pixtocam, camtoworld, distortion_params=distortion_params, pixtocam_ndc=pixtocam_ndc, camtype=camtype, xnp=xnp, ) broadcast_scalar = lambda x: xnp.broadcast_to(x, pix_x_int.shape)[Ellipsis, None] ray_kwargs = { 'pixels': xnp.stack([pix_x_int, pix_y_int], axis=-1), 'near': broadcast_scalar(near), 'far': broadcast_scalar(far), 'cam_idx': broadcast_scalar(0), } return utils.Rays(*ray_args, **ray_kwargs) def cast_pinhole_rays( camtoworld, height, width, focal, near, far, xnp = np, ): """Generates a pinhole camera ray batch (w/o distortion).""" return cast_general_rays( camtoworld, get_pixtocam(focal, width, height, xnp=xnp), height, width, near, far, camtype=ProjectionType.PERSPECTIVE, xnp=xnp, ) def cast_spherical_rays( camtoworld, height, width, near, far, xnp, ): """Generates a spherical camera ray batch.""" return cast_general_rays( camtoworld, xnp.diag(xnp.array([2.0 * np.pi / width, np.pi / height, 1.0])), height, width, near, far, camtype=ProjectionType.PANORAMIC, xnp=xnp, ) def jax_camera_from_tuple( camera_tuple, image_size, projection_type, ): """Converts a camera tuple into a JAX camera. Args: camera_tuple: A tuple containing `inv_intrinsics`, the inverse intrinsics matrix; `extrinsics`, the camera to world matrix; and `distortion_params`, the dictionary of distortion parameters. image_size: An array containing the (width, height) image size. projection_type: The projection type of the camera. Returns: A JAX camera class instance encoding the same camera information. """ if projection_type.value not in { ProjectionType.PERSPECTIVE.value, ProjectionType.FISHEYE.value, }: raise ValueError(f'Projection {projection_type} is not supported.') inv_intrinsics, extrinsic, distortion_params = camera_tuple[:3] intrinsics = jnp.linalg.inv(inv_intrinsics) focal_length = intrinsics[0, 0] principal_point = intrinsics[:2, 2] pixel_aspect_ratio = intrinsics[1, 1] / intrinsics[0, 0] radial_distortion = None tangential_distortion = None if distortion_params is not None: if ( 'k1' in distortion_params and 'k2' in distortion_params and 'k3' in distortion_params ): radial_keys = ['k1', 'k2', 'k3', 'k4'] radial_distortion = jnp.array( [distortion_params[k] for k in radial_keys if k in distortion_params] ) if 'p1' in distortion_params and 'p2' in distortion_params: tangential_distortion = jnp.array([ distortion_params['p1'], distortion_params['p2'], ]) extrinsic = jnp.concatenate( [extrinsic[:3, :4], jnp.array([[0, 0, 0, 1]])], axis=0 ) # Convert to OpenCV coordinates. extrinsic = math.matmul(extrinsic, jnp.diag(jnp.array([1, -1, -1, 1]))) world_to_cam = jnp.linalg.inv(extrinsic) camera = jaxcam.Camera.create( focal_length=focal_length, pixel_aspect_ratio=pixel_aspect_ratio, radial_distortion=radial_distortion, tangential_distortion=tangential_distortion, principal_point=principal_point, image_size=image_size, is_fisheye=(projection_type.value == ProjectionType.FISHEYE.value), ) camera = jaxcam.update_world_to_camera_matrix(camera, world_to_cam) return camera def tuple_from_jax_camera( jax_camera, ): """Converts a JAX camera into a camera tuple.""" focal_x = jax_camera.focal_length focal_y = jax_camera.focal_length * jax_camera.pixel_aspect_ratio intrinsic = jnp.block([ [focal_x, jax_camera.skew, jax_camera.principal_point[0]], [0, focal_y, jax_camera.principal_point[1]], [0, 0, 1], ]) pix_to_cam = jnp.linalg.inv(intrinsic) world_to_cam = jaxcam.world_to_camera_matrix(jax_camera) cam_to_world = jnp.linalg.inv(world_to_cam) # Convert back to OpenGL coordinates. cam_to_world = math.matmul(cam_to_world, jnp.diag(jnp.array([1, -1, -1, 1]))) cam_to_world = cam_to_world[:3, :] distortion_params = None if jax_camera.has_distortion: distortion_params = {} if jax_camera.has_radial_distortion: distortion_params.update({ 'k1': jax_camera.radial_distortion[0], 'k2': jax_camera.radial_distortion[1], 'k3': jax_camera.radial_distortion[2], 'k4': jax_camera.radial_distortion[3], }) if jax_camera.has_tangential_distortion: distortion_params.update({ 'p1': jax_camera.tangential_distortion[0], 'p2': jax_camera.tangential_distortion[1], }) return pix_to_cam, cam_to_world, distortion_params def rotation_distance( rotation_mat1, rotation_mat2 ): """Computes the angle between two rotation matrices in degrees. Args: rotation_mat1: (3, 3) The first batch of rotation matrix. rotation_mat2: (3, 3) The second batch of rotation matrix. Returns: The angle in degrees between 0 and 180. """ axis_angle1 = rigid_body.log_so3(rotation_mat1) axis_angle2 = rigid_body.log_so3(rotation_mat2) orientation_error_deg = jnp.degrees( jnp.linalg.norm(axis_angle1 - axis_angle2, axis=-1) ) return jnp.where( # pytype: disable=bad-return-type # jnp-type orientation_error_deg < 180, orientation_error_deg, 360 - orientation_error_deg, ) def compute_camera_metrics( cameras_gt, cameras_pred ): """Computes the metrics between two cameras.""" orientation_diffs = jax.vmap(rotation_distance)( cameras_pred.orientation, cameras_gt.orientation ) translation_diffs = jnp.abs(cameras_pred.translation - cameras_gt.translation) diffs = { 'focal_length': jnp.abs( cameras_pred.focal_length - cameras_gt.focal_length ), 'position': jnp.linalg.norm( cameras_pred.position - cameras_gt.position, axis=-1 ), 'translation_x': translation_diffs[Ellipsis, 0], 'translation_y': translation_diffs[Ellipsis, 1], 'translation_z': translation_diffs[Ellipsis, 2], 'orientation': jnp.abs(orientation_diffs), 'principal_points': jnp.linalg.norm( cameras_pred.principal_point - cameras_gt.principal_point, axis=-1, ), } if cameras_pred.radial_distortion is not None: radial_distortion_gt = jnp.zeros(4) if cameras_gt.has_radial_distortion: radial_distortion_gt = cameras_gt.radial_distortion for i in range(cameras_pred.radial_distortion.shape[-1]): diffs[f'radial_distortion_{i}'] = jnp.abs( cameras_pred.radial_distortion[Ellipsis, i] - radial_distortion_gt[Ellipsis, i] ) if cameras_pred.tangential_distortion is not None: tangential_distortion_gt = jnp.zeros(2) if cameras_gt.has_tangential_distortion: tangential_distortion_gt = cameras_gt.radial_distortion for i in range(cameras_pred.tangential_distortion.shape[-1]): diffs[f'tangential_distortion_{i}'] = jnp.abs( cameras_pred.tangential_distortion[Ellipsis, i] - tangential_distortion_gt[Ellipsis, i] ) return diffs def perturb_cameras( rng, cameras, sigma_look_at, sigma_position, sigma_focal_length = 0.0, sigma_dolly_z = 0.0, single_dolly = True, dolly_use_average = False, ): """Randomly perturb camera positions and orientations. For position the 3D coordinate is simply shifted according to an offset vector. For the orientation an offset angle is calculated based on spherical coordinates. The underlying offsets are randomly chosen using normal distributions absed on the input sigmas. Args: rng: A PRNGKey. cameras: Cameras to perturb. sigma_look_at: Strength of look-at position offset. Higher means stronger. sigma_position: Strength of position offset. Higher means stronger. sigma_focal_length: Strength of focal length zoom z-axis scale. Higher means stronger. This is essentially a percentage (0.2 means 20%). sigma_dolly_z: Strength of Dolly zoom z-axis scale. Higher means stronger. This is essentially a percentage (0.2 means 20%). single_dolly: If True, only have a single perturbation for dolly zoom. dolly_use_average: If True, set the dolly z to the average of the input instead of perturbing. Returns: Perturbed cameras. """ # Dolly zoom. if sigma_dolly_z > 0.0 or dolly_use_average: # Turn out "percentage" into a log scale. This is equivalent to having # minval = log(1+s) and maxval = log(1/(1+s)) but sampling from a normal # distribution. log_sigma_dolly_z = jnp.log1p(sigma_dolly_z) rng, dolly_key = random.split(rng) translation = cameras.translation x, y, z = jnp.split(translation, 3, -1) if dolly_use_average: new_z = jnp.broadcast_to(z.mean(axis=0, keepdims=True), z.shape) elif single_dolly: new_z = z * jnp.exp(random.normal(dolly_key, (1,)) * log_sigma_dolly_z) else: new_z = z * jnp.exp(random.normal(dolly_key, z.shape) * log_sigma_dolly_z) new_focal_length = cameras.focal_length * (new_z / z).squeeze(-1) new_translation = jnp.concatenate([x, y, new_z], axis=-1) new_position = jax.vmap(spin_math.matmul)( -cameras.orientation.swapaxes(-1, -2), new_translation ) cameras = cameras.replace( position=new_position, focal_length=new_focal_length ) # Perturb focal length. rng, key = random.split(rng) new_focal_length = cameras.focal_length * jnp.exp( random.normal(key, cameras.shape) * jnp.log1p(sigma_focal_length) ) cameras = cameras.replace(focal_length=new_focal_length) camera_positions = cameras.position up_vectors = -cameras.orientation[Ellipsis, 1, :] # Perturb camera positions. rng, key = random.split(rng) perturb_dir = spin_math.normalize(random.normal(key, camera_positions.shape)) camera_positions_perturbed = np.array( sigma_position * perturb_dir + camera_positions ) # Perturb look-at point. look_at_positions = jax.vmap(geometry.line_closest_point)( cameras.position, cameras.optical_axis, jnp.zeros_like(cameras.position) ) rng, key = random.split(rng) perturb_dir = math.normalize(random.normal(key, camera_positions.shape)) look_at_positions_perturbed = np.array( sigma_look_at * perturb_dir + look_at_positions ) # Apply the look-at function. new_cameras = [] for camera, camera_position, look_at_position, up_vector in zip( cameras, camera_positions_perturbed, look_at_positions_perturbed, up_vectors, ): new_cameras.append( jaxcam.look_at( camera=camera, eye=camera_position, center=look_at_position, world_up=up_vector, ) ) cameras = jaxcam.concatenate(new_cameras) return cameras
evocodebench_data_256
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions for shooting and rendering rays.""" import jax import jax.numpy as jnp import jax.scipy as jsp from internal import math from internal import stepfun def lift_gaussian(d, t_mean, t_var, r_var, diag): """Lift a Gaussian defined along a ray to 3D coordinates.""" mean = d[Ellipsis, None, :] * t_mean[Ellipsis, None] d_mag_sq = jnp.maximum(1e-10, jnp.sum(d**2, axis=-1, keepdims=True)) if diag: d_outer_diag = d**2 null_outer_diag = 1 - d_outer_diag / d_mag_sq t_cov_diag = t_var[Ellipsis, None] * d_outer_diag[Ellipsis, None, :] xy_cov_diag = r_var[Ellipsis, None] * null_outer_diag[Ellipsis, None, :] cov_diag = t_cov_diag + xy_cov_diag return mean, cov_diag else: d_outer = d[Ellipsis, :, None] * d[Ellipsis, None, :] eye = jnp.eye(d.shape[-1]) null_outer = eye - d[Ellipsis, :, None] * (d / d_mag_sq)[Ellipsis, None, :] t_cov = t_var[Ellipsis, None, None] * d_outer[Ellipsis, None, :, :] xy_cov = r_var[Ellipsis, None, None] * null_outer[Ellipsis, None, :, :] cov = t_cov + xy_cov return mean, cov def gaussianize_frustum(t0, t1): """Convert intervals along a conical frustum into means and variances.""" # A more stable version of Equation 7 from https://arxiv.org/abs/2103.13415. s = t0 + t1 d = t1 - t0 eps = jnp.finfo(jnp.float32).eps ** 2 ratio = d**2 / jnp.maximum(eps, 3 * s**2 + d**2) t_mean = s * (1 / 2 + ratio) t_var = (1 / 12) * d**2 - (1 / 15) * ratio**2 * (12 * s**2 - d**2) r_var = (1 / 16) * s**2 + d**2 * (5 / 48 - (1 / 15) * ratio) return t_mean, t_var, r_var def conical_frustum_to_gaussian(d, t0, t1, base_radius, diag): """Approximate a 3D conical frustum as a Gaussian distribution (mean+cov). Assumes the ray is originating from the origin, and base_radius is the radius at dist=1. Doesn't assume `d` is normalized. Args: d: jnp.float32 3-vector, the axis of the cone t0: float, the starting distance of the frustum. t1: float, the ending distance of the frustum. base_radius: float, the scale of the radius as a function of distance. diag: boolean, whether or the Gaussian will be diagonal or full-covariance. Returns: a Gaussian (mean and covariance). """ t_mean, t_var, r_var = gaussianize_frustum(t0, t1) r_var *= base_radius**2 mean, cov = lift_gaussian(d, t_mean, t_var, r_var, diag) return mean, cov def cylinder_to_gaussian(d, t0, t1, radius, diag): """Approximate a cylinder as a Gaussian distribution (mean+cov). Assumes the ray is originating from the origin, and radius is the radius. Does not renormalize `d`. Args: d: jnp.float32 3-vector, the axis of the cylinder t0: float, the starting distance of the cylinder. t1: float, the ending distance of the cylinder. radius: float, the radius of the cylinder diag: boolean, whether or the Gaussian will be diagonal or full-covariance. Returns: a Gaussian (mean and covariance). """ t_mean = (t0 + t1) / 2 r_var = radius**2 / 4 t_var = (t1 - t0) ** 2 / 12 return lift_gaussian(d, t_mean, t_var, r_var, diag) def cast_rays(tdist, origins, directions, radii, ray_shape, diag=True): """Cast rays (cone- or cylinder-shaped) and featurize sections of it. Args: tdist: float array, the "fencepost" distances along the ray. origins: float array, the ray origin coordinates. directions: float array, the ray direction vectors. radii: float array, the radii (base radii for cones) of the rays. ray_shape: string, the shape of the ray, must be 'cone' or 'cylinder'. diag: boolean, whether or not the covariance matrices should be diagonal. Returns: a tuple of arrays of means and covariances. """ t0 = tdist[Ellipsis, :-1] t1 = tdist[Ellipsis, 1:] if ray_shape == 'cone': gaussian_fn = conical_frustum_to_gaussian elif ray_shape == 'cylinder': gaussian_fn = cylinder_to_gaussian else: raise ValueError("ray_shape must be 'cone' or 'cylinder'") means, covs = gaussian_fn(directions, t0, t1, radii, diag) means = means + origins[Ellipsis, None, :] return means, covs def compute_alpha_weights_helper(density_delta): """Helper function for compute_alpha_weights.""" log_trans = -jnp.concatenate( [ jnp.zeros_like(density_delta[Ellipsis, :1]), jnp.cumsum(density_delta[Ellipsis, :-1], axis=-1), ], axis=-1, ) alpha = 1 - jnp.exp(-density_delta) trans = jnp.exp(log_trans) weights = alpha * trans return weights def compute_alpha_weights( density, tdist, dirs, **kwargs, ): """Helper function for computing alpha compositing weights.""" t_delta = jnp.diff(tdist) delta = t_delta * jnp.linalg.norm(dirs[Ellipsis, None, :], axis=-1) density_delta = density * delta return compute_alpha_weights_helper(density_delta, **kwargs) def volumetric_rendering( rgbs, weights, tdist, bg_rgbs, compute_extras, extras=None, percentiles = (5, 50, 95), ): """Volumetric Rendering Function. Args: rgbs: jnp.ndarray(float32), color, [batch_size, num_samples, 3] weights: jnp.ndarray(float32), weights, [batch_size, num_samples]. tdist: jnp.ndarray(float32), [batch_size, num_samples]. bg_rgbs: jnp.ndarray(float32), the color(s) to use for the background. compute_extras: bool, if True, compute extra quantities besides color. extras: dict, a set of values along rays to render by alpha compositing. percentiles: depth will be returned for these percentiles. Returns: rendering: a dict containing an rgb image of size [batch_size, 3], and other visualizations if compute_extras=True. """ eps = jnp.finfo(jnp.float32).eps rendering = {} acc = weights.sum(axis=-1) bg_w = jnp.maximum(0, 1 - acc[Ellipsis, None]) # The weight of the background. if rgbs is not None: rgb = (weights[Ellipsis, None] * rgbs).sum(axis=-2) + bg_w * bg_rgbs else: rgb = None rendering['rgb'] = rgb if compute_extras: rendering['acc'] = acc if extras is not None: for k, v in extras.items(): if v is not None: rendering[k] = (weights[Ellipsis, None] * v).sum(axis=-2) expectation = lambda x: (weights * x).sum(axis=-1) / jnp.maximum(eps, acc) t_mids = 0.5 * (tdist[Ellipsis, :-1] + tdist[Ellipsis, 1:]) # For numerical stability this expectation is computing using log-distance. rendering['distance_mean'] = jnp.clip( jnp.nan_to_num(jnp.exp(expectation(jnp.log(t_mids))), jnp.inf), tdist[Ellipsis, 0], tdist[Ellipsis, -1], ) # Normalize the weights to sum to 1. weights_norm = weights / jnp.maximum(eps, acc[Ellipsis, None]) distance_percentiles = stepfun.weighted_percentile( tdist, weights_norm, percentiles ) for i, p in enumerate(percentiles): s = 'median' if p == 50 else 'percentile_' + str(p) rendering['distance_' + s] = distance_percentiles[Ellipsis, i] return rendering
evocodebench_data_257
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating step functions (piecewise-constant 1D functions). We have a shared naming and dimension convention for these functions. All input/output step functions are assumed to be aligned along the last axis. `t` always indicates the x coordinates of the *endpoints* of a step function. `y` indicates unconstrained values for the *bins* of a step function `w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin values that *integrate* to <= 1. """ from internal import linspline from internal import math from internal import utils import jax import jax.numpy as jnp import numpy as np def query(tq, t, y, left=None, right=None): """Query step function (t, y) at locations tq. Edges repeat by default.""" utils.assert_valid_stepfun(t, y) # Query the step function to recover the interval value. (i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu()) # Apply boundary conditions. left = y[Ellipsis, :1] if left is None else left right = y[Ellipsis, -1:] if right is None else right yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq) return yq def weight_to_pdf(t, w): """Turn a vector of weights that sums to 1 into a PDF that integrates to 1.""" utils.assert_valid_stepfun(t, w) td = jnp.diff(t) return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td)) def pdf_to_weight(t, p): """Turn a PDF that integrates to 1 into a vector of weights that sums to 1.""" utils.assert_valid_stepfun(t, p) return p * jnp.diff(t) def integrate_weights(w): """Compute the cumulative sum of w, assuming all weight vectors sum to 1. The output's size on the last dimension is one greater than that of the input, because we're computing the integral corresponding to the endpoints of a step function, not the integral of the interior/bin values. Args: w: Tensor, which will be integrated along the last axis. This is assumed to sum to 1 along the last axis, and this function will (silently) break if that is not the case. Returns: cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1 """ cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1)) shape = cw.shape[:-1] + (1,) # Ensure that the CDF starts with exactly 0 and ends with exactly 1. cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1) return cw0 def invert_cdf(u, t, w_logits): """Invert the CDF defined by (t, w) at the points specified by u in [0, 1).""" utils.assert_valid_stepfun(t, w_logits) # Compute the PDF and CDF for each weight vector. w = jax.nn.softmax(w_logits, axis=-1) cw = integrate_weights(w) # Interpolate into the inverse CDF. t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu()) return t_new def sample( rng, t, w_logits, num_samples, single_jitter=False, deterministic_center=False, eps=jnp.finfo(jnp.float32).eps, ): """Piecewise-Constant PDF sampling from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of samples. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. deterministic_center: bool, if False, when `rng` is None return samples that linspace the entire PDF. If True, skip the front and back of the linspace so that the centers of each PDF interval are returned. eps: float, something like numerical epsilon. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) # Draw uniform samples. if rng is None: # Match the behavior of jax.random.uniform() by spanning [0, 1-eps]. if deterministic_center: pad = 1 / (2 * num_samples) u = jnp.linspace(pad, 1.0 - pad - eps, num_samples) else: u = jnp.linspace(0, 1.0 - eps, num_samples) u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,)) else: # `u` is in [0, 1) --- it can be zero, but it can never be 1. u_max = eps + (1 - eps) / num_samples max_jitter = (1 - u_max) / (num_samples - 1) - eps d = 1 if single_jitter else num_samples u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform( rng, t.shape[:-1] + (d,), maxval=max_jitter ) return invert_cdf(u, t, w_logits) def sample_intervals( rng, t, w_logits, num_samples, single_jitter=False, domain=(-jnp.inf, jnp.inf), ): """Sample *intervals* (rather than points) from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of intervals to sample. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. domain: (minval, maxval), the range of valid values for `t`. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) if num_samples <= 1: raise ValueError(f'num_samples must be > 1, is {num_samples}.') # Sample a set of points from the step function. centers = sample( rng, t, w_logits, num_samples, single_jitter, deterministic_center=True ) # The intervals we return will span the midpoints of each adjacent sample. mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2 # Each first/last fencepost is the reflection of the first/last midpoint # around the first/last sampled center. first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1] last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:] samples = jnp.concatenate([first, mid, last], axis=-1) # We clamp to the limits of the input domain, provided by the caller. samples = jnp.clip(samples, *domain) return samples def lossfun_distortion(t, w): """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" utils.assert_valid_stepfun(t, w) # The loss incurred between all pairs of intervals. ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2 dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :]) loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1) # The loss incurred within each individual interval with itself. loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3 return loss_inter + loss_intra def weighted_percentile(t, w, ps): """Compute the weighted percentiles of a step function. w's must sum to 1.""" utils.assert_valid_stepfun(t, w) cw = integrate_weights(w) # We want to interpolate into the integrated weights according to `ps`. wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( jnp.array(ps) / 100, cw, t ) return wprctile def resample(t, tp, vp, use_avg=False): """Resample a step function defined by (tp, vp) into intervals t. Notation roughly matches jnp.interp. Resamples by summation by default. Args: t: tensor with shape (..., n+1), the endpoints to resample into. tp: tensor with shape (..., m+1), the endpoints of the step function being resampled. vp: tensor with shape (..., m), the values of the step function being resampled. use_avg: bool, if False, return the sum of the step function for each interval in `t`. If True, return the average, weighted by the width of each interval in `t`. Returns: v: tensor with shape (..., n), the values of the resampled step function. """ utils.assert_valid_stepfun(tp, vp) if use_avg: wp = jnp.diff(tp) v_numer = resample(t, tp, vp * wp, use_avg=False) v_denom = resample(t, tp, wp, use_avg=False) v = math.safe_div(v_numer, v_denom) return v acc = jnp.cumsum(vp, axis=-1) acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1) acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( t, tp, acc0 ) v = jnp.diff(acc0_resampled, axis=-1) return v def blur_and_resample_weights(tq, t, w, blur_halfwidth): """Blur the (t, w) histogram by blur_halfwidth, then resample it into tq.""" utils.assert_valid_stepfun(t, w) # Convert the histogram to a PDF. p = weight_to_pdf(t, w) # Blur the PDF step function into a piecewise linear spline PDF. t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth) # Integrate the spline PDF, then query it to get integrated weights. quad = linspline.compute_integral(t_linspline, p_linspline) acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad) # Undo the integration to get weights. wq = jnp.diff(acc_wq, axis=-1) # Fix negative values to 0, as they should never happen but may due to # numerical issues. wq = jnp.maximum(0, wq) return wq
evocodebench_data_258
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating step functions (piecewise-constant 1D functions). We have a shared naming and dimension convention for these functions. All input/output step functions are assumed to be aligned along the last axis. `t` always indicates the x coordinates of the *endpoints* of a step function. `y` indicates unconstrained values for the *bins* of a step function `w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin values that *integrate* to <= 1. """ from internal import linspline from internal import math from internal import utils import jax import jax.numpy as jnp import numpy as np def query(tq, t, y, left=None, right=None): """Query step function (t, y) at locations tq. Edges repeat by default.""" utils.assert_valid_stepfun(t, y) # Query the step function to recover the interval value. (i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu()) # Apply boundary conditions. left = y[Ellipsis, :1] if left is None else left right = y[Ellipsis, -1:] if right is None else right yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq) return yq def weight_to_pdf(t, w): """Turn a vector of weights that sums to 1 into a PDF that integrates to 1.""" utils.assert_valid_stepfun(t, w) td = jnp.diff(t) return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td)) def pdf_to_weight(t, p): """Turn a PDF that integrates to 1 into a vector of weights that sums to 1.""" utils.assert_valid_stepfun(t, p) return p * jnp.diff(t) def integrate_weights(w): """Compute the cumulative sum of w, assuming all weight vectors sum to 1. The output's size on the last dimension is one greater than that of the input, because we're computing the integral corresponding to the endpoints of a step function, not the integral of the interior/bin values. Args: w: Tensor, which will be integrated along the last axis. This is assumed to sum to 1 along the last axis, and this function will (silently) break if that is not the case. Returns: cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1 """ cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1)) shape = cw.shape[:-1] + (1,) # Ensure that the CDF starts with exactly 0 and ends with exactly 1. cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1) return cw0 def invert_cdf(u, t, w_logits): """Invert the CDF defined by (t, w) at the points specified by u in [0, 1).""" utils.assert_valid_stepfun(t, w_logits) # Compute the PDF and CDF for each weight vector. w = jax.nn.softmax(w_logits, axis=-1) cw = integrate_weights(w) # Interpolate into the inverse CDF. t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu()) return t_new def sample( rng, t, w_logits, num_samples, single_jitter=False, deterministic_center=False, eps=jnp.finfo(jnp.float32).eps, ): """Piecewise-Constant PDF sampling from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of samples. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. deterministic_center: bool, if False, when `rng` is None return samples that linspace the entire PDF. If True, skip the front and back of the linspace so that the centers of each PDF interval are returned. eps: float, something like numerical epsilon. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) # Draw uniform samples. if rng is None: # Match the behavior of jax.random.uniform() by spanning [0, 1-eps]. if deterministic_center: pad = 1 / (2 * num_samples) u = jnp.linspace(pad, 1.0 - pad - eps, num_samples) else: u = jnp.linspace(0, 1.0 - eps, num_samples) u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,)) else: # `u` is in [0, 1) --- it can be zero, but it can never be 1. u_max = eps + (1 - eps) / num_samples max_jitter = (1 - u_max) / (num_samples - 1) - eps d = 1 if single_jitter else num_samples u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform( rng, t.shape[:-1] + (d,), maxval=max_jitter ) return invert_cdf(u, t, w_logits) def sample_intervals( rng, t, w_logits, num_samples, single_jitter=False, domain=(-jnp.inf, jnp.inf), ): """Sample *intervals* (rather than points) from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of intervals to sample. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. domain: (minval, maxval), the range of valid values for `t`. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) if num_samples <= 1: raise ValueError(f'num_samples must be > 1, is {num_samples}.') # Sample a set of points from the step function. centers = sample( rng, t, w_logits, num_samples, single_jitter, deterministic_center=True ) # The intervals we return will span the midpoints of each adjacent sample. mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2 # Each first/last fencepost is the reflection of the first/last midpoint # around the first/last sampled center. first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1] last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:] samples = jnp.concatenate([first, mid, last], axis=-1) # We clamp to the limits of the input domain, provided by the caller. samples = jnp.clip(samples, *domain) return samples def lossfun_distortion(t, w): """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" utils.assert_valid_stepfun(t, w) # The loss incurred between all pairs of intervals. ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2 dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :]) loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1) # The loss incurred within each individual interval with itself. loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3 return loss_inter + loss_intra def weighted_percentile(t, w, ps): """Compute the weighted percentiles of a step function. w's must sum to 1.""" utils.assert_valid_stepfun(t, w) cw = integrate_weights(w) # We want to interpolate into the integrated weights according to `ps`. wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( jnp.array(ps) / 100, cw, t ) return wprctile def resample(t, tp, vp, use_avg=False): """Resample a step function defined by (tp, vp) into intervals t. Notation roughly matches jnp.interp. Resamples by summation by default. Args: t: tensor with shape (..., n+1), the endpoints to resample into. tp: tensor with shape (..., m+1), the endpoints of the step function being resampled. vp: tensor with shape (..., m), the values of the step function being resampled. use_avg: bool, if False, return the sum of the step function for each interval in `t`. If True, return the average, weighted by the width of each interval in `t`. Returns: v: tensor with shape (..., n), the values of the resampled step function. """ utils.assert_valid_stepfun(tp, vp) if use_avg: wp = jnp.diff(tp) v_numer = resample(t, tp, vp * wp, use_avg=False) v_denom = resample(t, tp, wp, use_avg=False) v = math.safe_div(v_numer, v_denom) return v acc = jnp.cumsum(vp, axis=-1) acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1) acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( t, tp, acc0 ) v = jnp.diff(acc0_resampled, axis=-1) return v def blur_and_resample_weights(tq, t, w, blur_halfwidth): """Blur the (t, w) histogram by blur_halfwidth, then resample it into tq.""" utils.assert_valid_stepfun(t, w) # Convert the histogram to a PDF. p = weight_to_pdf(t, w) # Blur the PDF step function into a piecewise linear spline PDF. t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth) # Integrate the spline PDF, then query it to get integrated weights. quad = linspline.compute_integral(t_linspline, p_linspline) acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad) # Undo the integration to get weights. wq = jnp.diff(acc_wq, axis=-1) # Fix negative values to 0, as they should never happen but may due to # numerical issues. wq = jnp.maximum(0, wq) return wq
evocodebench_data_259
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating step functions (piecewise-constant 1D functions). We have a shared naming and dimension convention for these functions. All input/output step functions are assumed to be aligned along the last axis. `t` always indicates the x coordinates of the *endpoints* of a step function. `y` indicates unconstrained values for the *bins* of a step function `w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin values that *integrate* to <= 1. """ from internal import linspline from internal import math from internal import utils import jax import jax.numpy as jnp import numpy as np def query(tq, t, y, left=None, right=None): """Query step function (t, y) at locations tq. Edges repeat by default.""" utils.assert_valid_stepfun(t, y) # Query the step function to recover the interval value. (i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu()) # Apply boundary conditions. left = y[Ellipsis, :1] if left is None else left right = y[Ellipsis, -1:] if right is None else right yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq) return yq def weight_to_pdf(t, w): """Turn a vector of weights that sums to 1 into a PDF that integrates to 1.""" utils.assert_valid_stepfun(t, w) td = jnp.diff(t) return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td)) def pdf_to_weight(t, p): """Turn a PDF that integrates to 1 into a vector of weights that sums to 1.""" utils.assert_valid_stepfun(t, p) return p * jnp.diff(t) def integrate_weights(w): """Compute the cumulative sum of w, assuming all weight vectors sum to 1. The output's size on the last dimension is one greater than that of the input, because we're computing the integral corresponding to the endpoints of a step function, not the integral of the interior/bin values. Args: w: Tensor, which will be integrated along the last axis. This is assumed to sum to 1 along the last axis, and this function will (silently) break if that is not the case. Returns: cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1 """ cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1)) shape = cw.shape[:-1] + (1,) # Ensure that the CDF starts with exactly 0 and ends with exactly 1. cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1) return cw0 def invert_cdf(u, t, w_logits): """Invert the CDF defined by (t, w) at the points specified by u in [0, 1).""" utils.assert_valid_stepfun(t, w_logits) # Compute the PDF and CDF for each weight vector. w = jax.nn.softmax(w_logits, axis=-1) cw = integrate_weights(w) # Interpolate into the inverse CDF. t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu()) return t_new def sample( rng, t, w_logits, num_samples, single_jitter=False, deterministic_center=False, eps=jnp.finfo(jnp.float32).eps, ): """Piecewise-Constant PDF sampling from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of samples. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. deterministic_center: bool, if False, when `rng` is None return samples that linspace the entire PDF. If True, skip the front and back of the linspace so that the centers of each PDF interval are returned. eps: float, something like numerical epsilon. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) # Draw uniform samples. if rng is None: # Match the behavior of jax.random.uniform() by spanning [0, 1-eps]. if deterministic_center: pad = 1 / (2 * num_samples) u = jnp.linspace(pad, 1.0 - pad - eps, num_samples) else: u = jnp.linspace(0, 1.0 - eps, num_samples) u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,)) else: # `u` is in [0, 1) --- it can be zero, but it can never be 1. u_max = eps + (1 - eps) / num_samples max_jitter = (1 - u_max) / (num_samples - 1) - eps d = 1 if single_jitter else num_samples u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform( rng, t.shape[:-1] + (d,), maxval=max_jitter ) return invert_cdf(u, t, w_logits) def sample_intervals( rng, t, w_logits, num_samples, single_jitter=False, domain=(-jnp.inf, jnp.inf), ): """Sample *intervals* (rather than points) from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of intervals to sample. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. domain: (minval, maxval), the range of valid values for `t`. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) if num_samples <= 1: raise ValueError(f'num_samples must be > 1, is {num_samples}.') # Sample a set of points from the step function. centers = sample( rng, t, w_logits, num_samples, single_jitter, deterministic_center=True ) # The intervals we return will span the midpoints of each adjacent sample. mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2 # Each first/last fencepost is the reflection of the first/last midpoint # around the first/last sampled center. first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1] last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:] samples = jnp.concatenate([first, mid, last], axis=-1) # We clamp to the limits of the input domain, provided by the caller. samples = jnp.clip(samples, *domain) return samples def lossfun_distortion(t, w): """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" utils.assert_valid_stepfun(t, w) # The loss incurred between all pairs of intervals. ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2 dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :]) loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1) # The loss incurred within each individual interval with itself. loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3 return loss_inter + loss_intra def weighted_percentile(t, w, ps): """Compute the weighted percentiles of a step function. w's must sum to 1.""" utils.assert_valid_stepfun(t, w) cw = integrate_weights(w) # We want to interpolate into the integrated weights according to `ps`. wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( jnp.array(ps) / 100, cw, t ) return wprctile def resample(t, tp, vp, use_avg=False): """Resample a step function defined by (tp, vp) into intervals t. Notation roughly matches jnp.interp. Resamples by summation by default. Args: t: tensor with shape (..., n+1), the endpoints to resample into. tp: tensor with shape (..., m+1), the endpoints of the step function being resampled. vp: tensor with shape (..., m), the values of the step function being resampled. use_avg: bool, if False, return the sum of the step function for each interval in `t`. If True, return the average, weighted by the width of each interval in `t`. Returns: v: tensor with shape (..., n), the values of the resampled step function. """ utils.assert_valid_stepfun(tp, vp) if use_avg: wp = jnp.diff(tp) v_numer = resample(t, tp, vp * wp, use_avg=False) v_denom = resample(t, tp, wp, use_avg=False) v = math.safe_div(v_numer, v_denom) return v acc = jnp.cumsum(vp, axis=-1) acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1) acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( t, tp, acc0 ) v = jnp.diff(acc0_resampled, axis=-1) return v def blur_and_resample_weights(tq, t, w, blur_halfwidth): """Blur the (t, w) histogram by blur_halfwidth, then resample it into tq.""" utils.assert_valid_stepfun(t, w) # Convert the histogram to a PDF. p = weight_to_pdf(t, w) # Blur the PDF step function into a piecewise linear spline PDF. t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth) # Integrate the spline PDF, then query it to get integrated weights. quad = linspline.compute_integral(t_linspline, p_linspline) acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad) # Undo the integration to get weights. wq = jnp.diff(acc_wq, axis=-1) # Fix negative values to 0, as they should never happen but may due to # numerical issues. wq = jnp.maximum(0, wq) return wq
evocodebench_data_260
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating step functions (piecewise-constant 1D functions). We have a shared naming and dimension convention for these functions. All input/output step functions are assumed to be aligned along the last axis. `t` always indicates the x coordinates of the *endpoints* of a step function. `y` indicates unconstrained values for the *bins* of a step function `w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin values that *integrate* to <= 1. """ from internal import linspline from internal import math from internal import utils import jax import jax.numpy as jnp import numpy as np def query(tq, t, y, left=None, right=None): """Query step function (t, y) at locations tq. Edges repeat by default.""" utils.assert_valid_stepfun(t, y) # Query the step function to recover the interval value. (i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu()) # Apply boundary conditions. left = y[Ellipsis, :1] if left is None else left right = y[Ellipsis, -1:] if right is None else right yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq) return yq def weight_to_pdf(t, w): """Turn a vector of weights that sums to 1 into a PDF that integrates to 1.""" utils.assert_valid_stepfun(t, w) td = jnp.diff(t) return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td)) def pdf_to_weight(t, p): """Turn a PDF that integrates to 1 into a vector of weights that sums to 1.""" utils.assert_valid_stepfun(t, p) return p * jnp.diff(t) def integrate_weights(w): """Compute the cumulative sum of w, assuming all weight vectors sum to 1. The output's size on the last dimension is one greater than that of the input, because we're computing the integral corresponding to the endpoints of a step function, not the integral of the interior/bin values. Args: w: Tensor, which will be integrated along the last axis. This is assumed to sum to 1 along the last axis, and this function will (silently) break if that is not the case. Returns: cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1 """ cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1)) shape = cw.shape[:-1] + (1,) # Ensure that the CDF starts with exactly 0 and ends with exactly 1. cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1) return cw0 def invert_cdf(u, t, w_logits): """Invert the CDF defined by (t, w) at the points specified by u in [0, 1).""" utils.assert_valid_stepfun(t, w_logits) # Compute the PDF and CDF for each weight vector. w = jax.nn.softmax(w_logits, axis=-1) cw = integrate_weights(w) # Interpolate into the inverse CDF. t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu()) return t_new def sample( rng, t, w_logits, num_samples, single_jitter=False, deterministic_center=False, eps=jnp.finfo(jnp.float32).eps, ): """Piecewise-Constant PDF sampling from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of samples. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. deterministic_center: bool, if False, when `rng` is None return samples that linspace the entire PDF. If True, skip the front and back of the linspace so that the centers of each PDF interval are returned. eps: float, something like numerical epsilon. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) # Draw uniform samples. if rng is None: # Match the behavior of jax.random.uniform() by spanning [0, 1-eps]. if deterministic_center: pad = 1 / (2 * num_samples) u = jnp.linspace(pad, 1.0 - pad - eps, num_samples) else: u = jnp.linspace(0, 1.0 - eps, num_samples) u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,)) else: # `u` is in [0, 1) --- it can be zero, but it can never be 1. u_max = eps + (1 - eps) / num_samples max_jitter = (1 - u_max) / (num_samples - 1) - eps d = 1 if single_jitter else num_samples u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform( rng, t.shape[:-1] + (d,), maxval=max_jitter ) return invert_cdf(u, t, w_logits) def sample_intervals( rng, t, w_logits, num_samples, single_jitter=False, domain=(-jnp.inf, jnp.inf), ): """Sample *intervals* (rather than points) from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of intervals to sample. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. domain: (minval, maxval), the range of valid values for `t`. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) if num_samples <= 1: raise ValueError(f'num_samples must be > 1, is {num_samples}.') # Sample a set of points from the step function. centers = sample( rng, t, w_logits, num_samples, single_jitter, deterministic_center=True ) # The intervals we return will span the midpoints of each adjacent sample. mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2 # Each first/last fencepost is the reflection of the first/last midpoint # around the first/last sampled center. first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1] last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:] samples = jnp.concatenate([first, mid, last], axis=-1) # We clamp to the limits of the input domain, provided by the caller. samples = jnp.clip(samples, *domain) return samples def lossfun_distortion(t, w): """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" utils.assert_valid_stepfun(t, w) # The loss incurred between all pairs of intervals. ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2 dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :]) loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1) # The loss incurred within each individual interval with itself. loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3 return loss_inter + loss_intra def weighted_percentile(t, w, ps): """Compute the weighted percentiles of a step function. w's must sum to 1.""" utils.assert_valid_stepfun(t, w) cw = integrate_weights(w) # We want to interpolate into the integrated weights according to `ps`. wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( jnp.array(ps) / 100, cw, t ) return wprctile def resample(t, tp, vp, use_avg=False): """Resample a step function defined by (tp, vp) into intervals t. Notation roughly matches jnp.interp. Resamples by summation by default. Args: t: tensor with shape (..., n+1), the endpoints to resample into. tp: tensor with shape (..., m+1), the endpoints of the step function being resampled. vp: tensor with shape (..., m), the values of the step function being resampled. use_avg: bool, if False, return the sum of the step function for each interval in `t`. If True, return the average, weighted by the width of each interval in `t`. Returns: v: tensor with shape (..., n), the values of the resampled step function. """ utils.assert_valid_stepfun(tp, vp) if use_avg: wp = jnp.diff(tp) v_numer = resample(t, tp, vp * wp, use_avg=False) v_denom = resample(t, tp, wp, use_avg=False) v = math.safe_div(v_numer, v_denom) return v acc = jnp.cumsum(vp, axis=-1) acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1) acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( t, tp, acc0 ) v = jnp.diff(acc0_resampled, axis=-1) return v def blur_and_resample_weights(tq, t, w, blur_halfwidth): """Blur the (t, w) histogram by blur_halfwidth, then resample it into tq.""" utils.assert_valid_stepfun(t, w) # Convert the histogram to a PDF. p = weight_to_pdf(t, w) # Blur the PDF step function into a piecewise linear spline PDF. t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth) # Integrate the spline PDF, then query it to get integrated weights. quad = linspline.compute_integral(t_linspline, p_linspline) acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad) # Undo the integration to get weights. wq = jnp.diff(acc_wq, axis=-1) # Fix negative values to 0, as they should never happen but may due to # numerical issues. wq = jnp.maximum(0, wq) return wq
evocodebench_data_261
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pyformat: mode=yapf """Math utility functions.""" from typing import Optional, Union from internal import math import jax from jax import numpy as jnp import optax def matmul(a, b): """jnp.matmul defaults to bfloat16 on TPU, but this doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def safe_sqrt(x, *, eps = jnp.finfo(jnp.float32).eps, value_at_zero = 0.0): """A safe version of jnp.sqrt that avoid evaluating at zero. Note: sqrt(x) = sqrt(eps) = 3e-4 when x < eps = 1.19e-7. Args: x: The operand. eps: A small number to prevent NaNs. value_at_zero: The value to clamp x to near zero. The return value will be sqrt(value_at_zero) Returns: The sqrt(x), or sqrt(value_at_zero) near zero. """ safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero)) return jnp.sqrt(safe_x) def safe_acos(t, eps = jnp.finfo(jnp.float32).eps): """A safe version of arccos which avoids evaluating at -1 or 1.""" return jnp.arccos(jnp.clip(t, -1.0 + eps, 1.0 - eps)) def safe_log(x, *, eps = jnp.finfo(jnp.float32).eps, value_at_zero = jnp.finfo(jnp.float32).eps): """Computes a safe log that avoids evaluating at zero. Args: x: Input array. eps: A small number to prevent NaNs. value_at_zero: The value to clamp x to near zero. The return value will be sqrt(value_at_zero) Returns: log(x) or log(value_at_zero) near zero. """ safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero)) return jnp.log(safe_x) def normalize( x, axis = -1, # pylint: disable=redefined-builtin ord = None, eps = jnp.finfo(jnp.float32).eps, ): """Normalize a vector.""" return x / optax.safe_norm(x, axis=axis, ord=ord, min_norm=eps, keepdims=True) def inv_sqrtm( matrix, normalize_eigvals = False, ): """Takes the inverse matrix square root of a PSD matrix. Forked from `coord.sqrtm`. Args: matrix: (..., d, d) A positive semi-definite matrix. normalize_eigvals: If True, normalize the eigenvalues by the geometric mean. Returns: The inverse square root of the matrix, and (eigvec, eigval) if return_eigs is True. """ eigvec, eigval = jax.lax.linalg.eigh( matrix, symmetrize_input=False, sort_eigenvalues=False) if normalize_eigvals: # Equivalent to dividing by geometric mean, but numerically stabler. log_eigval = jnp.log(eigval) eigval = jnp.exp(log_eigval - jnp.mean(log_eigval, axis=-1, keepdims=True)) scaling = math.safe_div(1, math.safe_sqrt(eigval)) scaling = scaling[Ellipsis, None, :] sqrtm_mat = matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1)) return sqrtm_mat, (eigvec, eigval) def to_homogeneous(v): """Converts a vector to a homogeneous representation. Args: v: (*, C) A non-homogeneous vector. Returns: (*, C+1) A homogeneous version of v. """ return jnp.concatenate([v, jnp.ones_like(v[Ellipsis, :1])], axis=-1) def from_homogeneous(v): """Converts a homogeneous vector to a non-homogeneous vector. Args: v: (*, C+1) A homogeneous vector. Returns: (*, C) The non-homogeneous version of v. """ return v[Ellipsis, :-1] / v[Ellipsis, -1:] def apply_homogeneous_transform(transform, vectors): """Apply a homogeneous transformation to a collection of vectors. Args: transform: (C+1,C+1) A homogeneous transformation matrix. vectors: (*,C) An array containing 3D points. Returns: (*,C) The points transformed by the array. """ vectors_h = to_homogeneous(vectors.reshape((-1, vectors.shape[-1]))) transformed = from_homogeneous(matmul(transform, vectors_h.T).T) return transformed.reshape(vectors.shape) def generalized_bias_and_gain(x, slope, threshold): """Maps the input according to the generalized bias and gain function. References: https://arxiv.org/abs/2010.09714 Args: x: The inputs array with values in [0, 1] to map. slope: The slope parameter of the curve which controls the slope of the curve at the threshold. threshold: The value at which `x` reverses its shape, and the point at which the output is guaranteed to be equal to the input. Returns: The output of the curve at each input point `x`. """ eps = jnp.finfo(jnp.float32).tiny left_curve = (threshold * x) / (x + slope * (threshold - x) + eps) right_curve = ((1 - threshold) * (x - 1) / (1 - x - slope * (threshold - x) + eps) + 1) return jnp.where(x < threshold, left_curve, right_curve)
evocodebench_data_262
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating step functions (piecewise-constant 1D functions). We have a shared naming and dimension convention for these functions. All input/output step functions are assumed to be aligned along the last axis. `t` always indicates the x coordinates of the *endpoints* of a step function. `y` indicates unconstrained values for the *bins* of a step function `w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin values that *integrate* to <= 1. """ from internal import linspline from internal import math from internal import utils import jax import jax.numpy as jnp import numpy as np def query(tq, t, y, left=None, right=None): """Query step function (t, y) at locations tq. Edges repeat by default.""" utils.assert_valid_stepfun(t, y) # Query the step function to recover the interval value. (i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu()) # Apply boundary conditions. left = y[Ellipsis, :1] if left is None else left right = y[Ellipsis, -1:] if right is None else right yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq) return yq def weight_to_pdf(t, w): """Turn a vector of weights that sums to 1 into a PDF that integrates to 1.""" utils.assert_valid_stepfun(t, w) td = jnp.diff(t) return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td)) def pdf_to_weight(t, p): """Turn a PDF that integrates to 1 into a vector of weights that sums to 1.""" utils.assert_valid_stepfun(t, p) return p * jnp.diff(t) def integrate_weights(w): """Compute the cumulative sum of w, assuming all weight vectors sum to 1. The output's size on the last dimension is one greater than that of the input, because we're computing the integral corresponding to the endpoints of a step function, not the integral of the interior/bin values. Args: w: Tensor, which will be integrated along the last axis. This is assumed to sum to 1 along the last axis, and this function will (silently) break if that is not the case. Returns: cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1 """ cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1)) shape = cw.shape[:-1] + (1,) # Ensure that the CDF starts with exactly 0 and ends with exactly 1. cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1) return cw0 def invert_cdf(u, t, w_logits): """Invert the CDF defined by (t, w) at the points specified by u in [0, 1).""" utils.assert_valid_stepfun(t, w_logits) # Compute the PDF and CDF for each weight vector. w = jax.nn.softmax(w_logits, axis=-1) cw = integrate_weights(w) # Interpolate into the inverse CDF. t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu()) return t_new def sample( rng, t, w_logits, num_samples, single_jitter=False, deterministic_center=False, eps=jnp.finfo(jnp.float32).eps, ): """Piecewise-Constant PDF sampling from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of samples. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. deterministic_center: bool, if False, when `rng` is None return samples that linspace the entire PDF. If True, skip the front and back of the linspace so that the centers of each PDF interval are returned. eps: float, something like numerical epsilon. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) # Draw uniform samples. if rng is None: # Match the behavior of jax.random.uniform() by spanning [0, 1-eps]. if deterministic_center: pad = 1 / (2 * num_samples) u = jnp.linspace(pad, 1.0 - pad - eps, num_samples) else: u = jnp.linspace(0, 1.0 - eps, num_samples) u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,)) else: # `u` is in [0, 1) --- it can be zero, but it can never be 1. u_max = eps + (1 - eps) / num_samples max_jitter = (1 - u_max) / (num_samples - 1) - eps d = 1 if single_jitter else num_samples u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform( rng, t.shape[:-1] + (d,), maxval=max_jitter ) return invert_cdf(u, t, w_logits) def sample_intervals( rng, t, w_logits, num_samples, single_jitter=False, domain=(-jnp.inf, jnp.inf), ): """Sample *intervals* (rather than points) from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of intervals to sample. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. domain: (minval, maxval), the range of valid values for `t`. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) if num_samples <= 1: raise ValueError(f'num_samples must be > 1, is {num_samples}.') # Sample a set of points from the step function. centers = sample( rng, t, w_logits, num_samples, single_jitter, deterministic_center=True ) # The intervals we return will span the midpoints of each adjacent sample. mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2 # Each first/last fencepost is the reflection of the first/last midpoint # around the first/last sampled center. first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1] last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:] samples = jnp.concatenate([first, mid, last], axis=-1) # We clamp to the limits of the input domain, provided by the caller. samples = jnp.clip(samples, *domain) return samples def lossfun_distortion(t, w): """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" utils.assert_valid_stepfun(t, w) # The loss incurred between all pairs of intervals. ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2 dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :]) loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1) # The loss incurred within each individual interval with itself. loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3 return loss_inter + loss_intra def weighted_percentile(t, w, ps): """Compute the weighted percentiles of a step function. w's must sum to 1.""" utils.assert_valid_stepfun(t, w) cw = integrate_weights(w) # We want to interpolate into the integrated weights according to `ps`. wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( jnp.array(ps) / 100, cw, t ) return wprctile def resample(t, tp, vp, use_avg=False): """Resample a step function defined by (tp, vp) into intervals t. Notation roughly matches jnp.interp. Resamples by summation by default. Args: t: tensor with shape (..., n+1), the endpoints to resample into. tp: tensor with shape (..., m+1), the endpoints of the step function being resampled. vp: tensor with shape (..., m), the values of the step function being resampled. use_avg: bool, if False, return the sum of the step function for each interval in `t`. If True, return the average, weighted by the width of each interval in `t`. Returns: v: tensor with shape (..., n), the values of the resampled step function. """ utils.assert_valid_stepfun(tp, vp) if use_avg: wp = jnp.diff(tp) v_numer = resample(t, tp, vp * wp, use_avg=False) v_denom = resample(t, tp, wp, use_avg=False) v = math.safe_div(v_numer, v_denom) return v acc = jnp.cumsum(vp, axis=-1) acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1) acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( t, tp, acc0 ) v = jnp.diff(acc0_resampled, axis=-1) return v def blur_and_resample_weights(tq, t, w, blur_halfwidth): """Blur the (t, w) histogram by blur_halfwidth, then resample it into tq.""" utils.assert_valid_stepfun(t, w) # Convert the histogram to a PDF. p = weight_to_pdf(t, w) # Blur the PDF step function into a piecewise linear spline PDF. t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth) # Integrate the spline PDF, then query it to get integrated weights. quad = linspline.compute_integral(t_linspline, p_linspline) acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad) # Undo the integration to get weights. wq = jnp.diff(acc_wq, axis=-1) # Fix negative values to 0, as they should never happen but may due to # numerical issues. wq = jnp.maximum(0, wq) return wq
evocodebench_data_263
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating coordinate spaces and distances along rays.""" from internal import geopoly from internal import math import jax from jax import random import jax.numpy as jnp import numpy as np def contract(x): """Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077).""" # Clamping to 1 produces correct scale inside |x| < 1 x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True)) scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq z = scale * x return z def inv_contract(z): """The inverse of contract().""" # Clamping to 1 produces correct scale inside |z| < 1 z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True)) inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq x = z / inv_scale return x def track_linearize(fn, mean, cov): """Apply function `fn` to a set of means and covariances, ala a Kalman filter. We can analytically transform a Gaussian parameterized by `mean` and `cov` with a function `fn` by linearizing `fn` around `mean`, and taking advantage of the fact that Covar[Ax + y] = A(Covar[x])A^T (see https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. cov: a tensor of covariances, where the last two axes are the dimensions. Returns: fn_mean: the transformed means. fn_cov: the transformed covariances. """ if (len(mean.shape) + 1) != len(cov.shape): raise ValueError('cov must be non-diagonal') fn_mean, lin_fn = jax.linearize(fn, mean) fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov)) return fn_mean, fn_cov def track_isotropic(fn, mean, scale): """Apply function `fn` to a set of means and scales, ala a Kalman filter. This is the isotropic or scalar equivalent of track_linearize, as we're still linearizing a function and tracking a Gaussian through it, but the input and output Gaussians are all isotropic and are only represented with a single `scale` value (where `scale**2` is the variance of the Gaussian). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. scale: a tensor of scales, with the same shape as means[..., -1]. Returns: fn_mean: the transformed means. fn_scale: the transformed scales. """ if mean.shape[:-1] != scale.shape: raise ValueError( f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.' ) d = mean.shape[-1] fn_mean, lin_fn = jax.linearize(fn, mean) if scale is not None: # Compute the Jacobian of fn function at the locations of each mean. jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)( jnp.broadcast_to(jnp.eye(d), mean.shape + (d,)) ) # The cube root of the determinant of the Jacobian is the geometric mean # of the eigenvalues of the Jacobian, which gives us the isotropic scaling # implied by `fn` at each mean that `scale` should be multiplied by. eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0. abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac))) # Special case d == 3 for speed's sake. fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d)) else: fn_scale = None return fn_mean, fn_scale def contract3_isoscale(x): """A fast version of track_isotropic(contract, *)'s scaling for 3D inputs.""" if x.shape[-1] != 3: raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.') norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1)) # Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq: return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq)) def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None): """Construct a bijection between metric distances and normalized distances. See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a detailed explanation. Args: fn: the function to ray distances. t_near: a tensor of near-plane distances. t_far: a tensor of far-plane distances. fn_inv: Optional, if not None then it's used as the inverse of fn(). Returns: t_to_s: a function that maps distances to normalized distances in [0, 1]. s_to_t: the inverse of t_to_s. """ if fn is None: fn_fwd = lambda x: x fn_inv = lambda x: x else: fn_fwd = fn if fn_inv is None: # A simple mapping from some functions to their inverse. inv_mapping = { 'reciprocal': jnp.reciprocal, 'log': jnp.exp, 'exp': jnp.log, 'sqrt': jnp.square, 'square': jnp.sqrt, } fn_inv = inv_mapping[fn.__name__] fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)] # Forcibly clip t to the range of valid values, to guard against inf's. t_clip = lambda t: jnp.clip(t, t_near, t_far) t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near) s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near)) return t_to_s, s_to_t def expected_sin(mean, var): """Compute the mean of sin(x), x ~ N(mean, var).""" return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value. def integrated_pos_enc(mean, var, min_deg, max_deg): """Encode `x` with sinusoids scaled by 2^[min_deg, max_deg). Args: mean: tensor, the mean coordinates to be encoded var: tensor, the variance of the coordinates to be encoded. min_deg: int, the min degree of the encoding. max_deg: int, the max degree of the encoding. Returns: encoded: jnp.ndarray, encoded variables. """ scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = mean.shape[:-1] + (-1,) scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape) scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape) return expected_sin( jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1), jnp.concatenate([scaled_var] * 2, axis=-1), ) def lift_and_diagonalize(mean, cov, basis): """Project `mean` and `cov` onto basis and diagonalize the projected cov.""" fn_mean = math.matmul(mean, basis) fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2) return fn_mean, fn_cov_diag def pos_enc(x, min_deg, max_deg, append_identity=True): """The positional encoding used by the original NeRF paper.""" scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = x.shape[:-1] + (-1,) scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c). scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c). # Note that we're not using safe_sin, unlike IPE. # (..., s*c + s*c). four_feat = jnp.sin( jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1) ) if append_identity: return jnp.concatenate([x, four_feat], axis=-1) else: return four_feat def sqrtm(mat, return_eigs=False): """Take the matrix square root of a PSD matrix [..., d, d].""" eigvec, eigval = jax.lax.linalg.eigh( mat, symmetrize_input=False, sort_eigenvalues=False ) scaling = math.safe_sqrt(eigval)[Ellipsis, None, :] sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1)) return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat def isotropize(cov, mode='accurate'): """Turn covariances into isotropic covariances with the same determinant.""" d = cov.shape[-1] if d == 1: return cov if mode == 'fast': det = jnp.linalg.det(cov) diag_val = det ** (1 / d) is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det) elif mode == 'accurate': log_det = jnp.linalg.slogdet(cov)[1] diag_val = jnp.exp(log_det / d) is_invalid = ~jnp.isfinite(log_det) else: raise ValueError(f'mode={mode} not implemented.') cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None] # Guard against NaN outputs when `det` is super small. Note that this does not # guard against NaN gradients! cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso) return cov_iso def construct_perp_basis(directions): """Construct a perpendicular basis for each 3-vector in `directions`.""" if directions.shape[-1] != 3: raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D') # To generate a vector perpendicular to `directions`, we take a cross-product # with an arbitrary vector [0, 0, 1]. cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0])) # In the rare case that `directions` is very close to [0, 0, 1], we compute an # alternate cross-product with [1, 1, 1] to use instead. cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0])) use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1) cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a) # Crossing `directions` with `cross1` gives us our 3rd vector. cross2 = jnp.cross(directions, cross1) # Normalize vectors before returning them. normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True)) return normalize(cross1), normalize(cross2) def hexify(rng, *, origins, directions, radii, tdist): """Produce hexagon-shaped samples from ray segments.""" # Construct a base set of angles, by linspacing [0, 2pi] in a specific order. # This is one of two orderings of angles that doesn't induce any anisotropy # into the sample covariance of the multisample coordinates. Any rotation and # mirroring along the z-axis of this ordering is also valid. # There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1]. # This seems to work less well though likely because of the strong correlation # between adjacent angles. thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1]) # Lift the angles to the size of the rays. sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas)) thetas = jnp.broadcast_to(thetas, sz) if rng is not None: # Randomly reverse the order of half of the hexes. key, rng = random.split(rng) flip = random.bernoulli(key, shape=sz[:-1]) thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) # Rotate each hex by some random amount. key, rng = random.split(rng) thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None] else: # If we're deterministic, flip and shift every other hex by 30 degrees. flip = jnp.arange(thetas.shape[-2]) % 2 thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) thetas += (flip * jnp.pi / 6)[Ellipsis, None] # TODO(barron): Plumb through the dx/dy frame for the original ray in the # image plane, to avoid the need of this. perp_axis1, perp_axis2 = construct_perp_basis(directions) # Grab each t-interval's midpoint and half-width. t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:] s = (t0 + t1) / 2 d = (t1 - t0) / 2 # Compute the length along the ray for each multisample, using mip-NeRF math. cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * ( (t1**2 + 2 * s**2)[Ellipsis, None] + (3 / np.sqrt(7)) * (np.arange(6) * (2 / 5) - 1) * math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None] ) # Compute the offset from the ray for each multisample. perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz # Go from ray coordinate to world coordinates. cx = perp_mag * jnp.cos(thetas) cy = perp_mag * jnp.sin(thetas) control = ( origins[Ellipsis, None, None, :] + perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None] + perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None] + directions[Ellipsis, None, None, :] * cz[Ellipsis, None] ) return control, perp_mag def unscented_transform(mean, cov, basis, axis=0): """Construct "sigma points" along `axis` from each mean and covariance.""" d = cov.shape[-1] mean_ex = jnp.expand_dims(mean, axis) if basis == 'mean': # This effectively disables the unscented transform. return mean_ex if basis.startswith('random_'): num_random = int(basis.split('_')[-1]) # TODO(barron): use a non-fixed random seed? noise = random.multivariate_normal( random.PRNGKey(0), jnp.zeros_like(mean), cov, (num_random,) + mean.shape[:-1], ) control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis) return control sqrtm_cov = sqrtm(cov) if any([ basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron'] ]): # Use tessellated regular polyhedra vertices (and vec(0)) as control points. if d != 3: raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.') base_shape, angular_tesselation = basis.split('_') transform = geopoly.generate_basis( base_shape, int(angular_tesselation), remove_symmetries=False ).T transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1) transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None] control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) elif basis == 'julier': # The most basic symmetric unscented transformation from the original paper, # which yields 2*d+1 control points. offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis) control = jnp.concatenate( [mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis ) elif basis == 'menegaz': # A compact unscented transformation from # folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf # which yields d+1 control points. if d == 3: # A hand-optimized version of the d==3 case. sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True) offsets = jnp.concatenate( [-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1 ) control = mean_ex + jnp.moveaxis(offsets, -1, axis) else: transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d # == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1)) transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1) control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) else: raise ValueError(f'basis={basis} not implemented.') return control def compute_control_points( means, covs, rays, tdist, rng, unscented_mip_basis, unscented_scale_mult, ): """Wrapper to compute unscented control points for the MLP class.""" if unscented_mip_basis == 'hexify': control, perp_mag = hexify( rng, origins=rays.origins, directions=rays.directions, radii=rays.radii, tdist=tdist, ) else: # Use a normal unscented transformation. control = unscented_transform( means, covs, basis=unscented_mip_basis, axis=-2, ) if unscented_scale_mult > 0: if rays is None: raise SyntaxError( 'Rays are required as input if unscented_scale_mult > 0.' ) # Mimic the math used by hexify to produce comparable scales. t_recon = jnp.sum( (control - rays.origins[Ellipsis, None, None, :]) * rays.directions[Ellipsis, None, None, :], axis=-1, ) perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon else: perp_mag = None return control, perp_mag
evocodebench_data_264
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for reflection directions and directional encodings.""" import math from internal import math as math_lib import jax.numpy as jnp import numpy as np def reflect(viewdirs, normals): """Reflect view directions about normals. The reflection of a vector v about a unit vector n is a vector u such that dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two equations is u = 2 dot(n, v) n - v. Args: viewdirs: [..., 3] array of view directions. normals: [..., 3] array of normal directions (assumed to be unit vectors). Returns: [..., 3] array of reflection directions. """ return ( 2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals - viewdirs ) def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps): """Normalize x to unit length along last axis. Normalizing vectors is surprisingly tricky, because you have to address the case where the denominator in the normalization is tiny or zero, in which case gradients will explode. For this reason, we perform two normalizations: in the forward pass, we clamp the denominator with ~1e-40, but in the backward pass we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the output of this function is unit norm (unless x is very very small) while preventing exploding gradients. Args: x: The array of values to normalize. grad_eps: The value to clip the squared norm by before division in the backward pass. Returns: A normalized array x / ||x||, normalized along the last axis. """ tiny = jnp.finfo(jnp.float32).tiny grad_eps = jnp.maximum(tiny, grad_eps) denom_sq = jnp.sum(x**2, axis=-1, keepdims=True) normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq)) normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq)) # Use `normal_val` in the forward pass but `normal_grad` in the backward pass. normal = math_lib.override_gradient(normal_val, normal_grad) return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal) def compute_weighted_mae(weights, normals, normals_gt): """Compute weighted mean angular error, assuming normals are unit length.""" angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1)) return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum()) def generalized_binomial_coeff(a, k): """Compute generalized binomial coefficients.""" return np.prod(a - np.arange(k)) / math.factorial(k) def assoc_legendre_coeff(l, m, k): """Compute associated Legendre polynomial coefficients. Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the (l, m)th associated Legendre polynomial, P_l^m(cos(theta)). Args: l: associated Legendre polynomial degree. m: associated Legendre polynomial order. k: power of cos(theta). Returns: A float, the coefficient of the term corresponding to the inputs. """ return ( (-1) ** m * 2**l * math.factorial(l) / math.factorial(k) / math.factorial(l - k - m) * generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l) ) def sph_harm_coeff(l, m, k): """Compute spherical harmonic coefficients.""" return np.sqrt( (2.0 * l + 1.0) * math.factorial(l - m) / (4.0 * np.pi * math.factorial(l + m)) ) * assoc_legendre_coeff(l, m, k) def get_ml_array(deg_view): """Create a list with all pairs of (l, m) values to use in the encoding.""" ml_list = [] for i in range(deg_view): l = 2**i # Only use nonnegative m values, later splitting real and imaginary parts. for m in range(l + 1): ml_list.append((m, l)) # Convert list into a numpy array. ml_array = np.array(ml_list).T return ml_array def generate_ide_fn(deg_view): """Generate integrated directional encoding (IDE) function. This function returns a function that computes the integrated directional encoding from Equations 6-8 of arxiv.org/abs/2112.03907. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating integrated directional encoding. Raises: ValueError: if deg_view is larger than 5. """ if deg_view > 5: raise ValueError('Only deg_view of at most 5 is numerically stable.') ml_array = get_ml_array(deg_view) l_max = 2 ** (deg_view - 1) # Create a matrix corresponding to ml_array holding all coefficients, which, # when multiplied (from the right) by the z coordinate Vandermonde matrix, # results in the z component of the encoding. mat = np.zeros((l_max + 1, ml_array.shape[1])) for i, (m, l) in enumerate(ml_array.T): for k in range(l - m + 1): mat[k, i] = sph_harm_coeff(l, m, k) def integrated_dir_enc_fn(xyz, kappa_inv): """Function returning integrated directional encoding (IDE). Args: xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at. kappa_inv: [..., 1] reciprocal of the concentration parameter of the von Mises-Fisher distribution. Returns: An array with the resulting IDE. """ x = xyz[Ellipsis, 0:1] y = xyz[Ellipsis, 1:2] z = xyz[Ellipsis, 2:3] # Compute z Vandermonde matrix. vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1) # Compute x+iy Vandermonde matrix. vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1) # Get spherical harmonics. sph_harms = vmxy * math_lib.matmul(vmz, mat) # Apply attenuation function using the von Mises-Fisher distribution # concentration parameter, kappa. sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1) ide = sph_harms * jnp.exp(-sigma * kappa_inv) # Split into real and imaginary parts and return return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1) return integrated_dir_enc_fn def generate_dir_enc_fn(deg_view): """Generate directional encoding (DE) function. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating directional encoding. """ integrated_dir_enc_fn = generate_ide_fn(deg_view) def dir_enc_fn(xyz): """Function returning directional encoding (DE).""" return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1])) return dir_enc_fn def orientation_loss(w, n, v): """Orientation loss on weights `w`, normals `n`, and -view directions `v`.""" n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1) return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1))
evocodebench_data_265
import logging import re from collections import Counter from collections import defaultdict from . import formatter from . import line_parser from . import patterns from nlm_ingestor.ingestor_utils import spell_utils from nlm_ingestor.ingestor_utils.utils import sent_tokenize logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) su = spell_utils.SpellUtil() def stem(line): line = line.replace("'s", "") line = line.replace("’s", "") return line def check_parentheses(text): count = 0 for i in text: if i == "(": count += 1 elif i == ")": count -= 1 return count == 0 def nlm_tokenize(line): # print(line) tokens = [] if not line: line = "" line = line.lower() trans_table = line.maketrans("-/", " ") line = line.translate(trans_table) line = line.translate(str.maketrans("", "", "�\\(*,.?•\\➢ƒ–\\)'\"—")) # line = patterns.num_unit.sub(r"100 \1", line) line = patterns.num_unit.sub(r"", line) line = stem(line) words = line.split() for word in words: if ( not word.isdigit() and not word.endswith("%") and not word.startswith("$") and not word.endswith("$") ): tokens.append(word) if len(tokens) == 0: tokens.append("unknown") return tokens # make sure that there is at least one word which is greater than two characters def find_floating_chars(line): words = line.split(" ") for word in words: if len(word) > 2: return False return True def is_table_row(line): line = line_parser.Line(line) return line.is_table_row def should_skip(line, xml=False): return len(line) <= 2 if not xml else len(line) == 0 def clean_lines(lines, xml=False): result = [] running_line = "" line_buffer = [] line_type = "para" header_block_idx = -1 block_idx = 0 line_set = set() for line_str in lines: # print(line_str) line_str = clean_line(line_str) if should_skip(line_str, xml=xml): continue line_without_numbers = re.sub(r"\d+", "", line_str) if line_without_numbers in line_set: continue else: line_set.add(line_without_numbers) curr_line = line_parser.Line(line_str) # this converst strings like 'e x e c u t i v e summary' to 'executive summary' if not xml and curr_line.has_spaced_characters: line_str = fix_spaced_characters(line_str) curr_line = line_parser.Line(line_str) if len(line_buffer) > 0: # find out if previous line was a discontinous line prev_line = line_buffer[-1] logger.debug("========") logger.debug(f"{prev_line.incomplete_line} >> {prev_line.text} \n") logger.debug(f"{curr_line.continuing_line} >> {curr_line.text} \n") # keep connecting lines as long as they seem incomplete is_incomplete = prev_line.incomplete_line or ( len(line_buffer) > 1 and not prev_line.ends_with_period ) logger.debug( f"incomplete: {is_incomplete}, is_list_or_row: {curr_line.is_list_or_row}, continuing_line: {curr_line.continuing_line}", ) if ( is_incomplete and not (curr_line.is_list_or_row or curr_line.line_type == "list_item") ) or curr_line.continuing_line: logger.debug("connecting..") running_line = formatter.connect(running_line, curr_line.text) line_buffer.append(curr_line) # if we are connecting lines, then this has to be a para unless it is a list_item, basically no headers if not line_type == "list_item": line_type = "para" else: # commit the line and start a new line # remove different types of bulletted list (for better formatting) but do not touch numbered line logger.debug("starting new line..") # if line_type == "list_item": # running_line = running_line[1:].lstrip() if line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) block_idx = block_idx + 1 running_line = curr_line.text line_buffer = [curr_line] line_type = curr_line.line_type logger.debug("========") else: running_line = curr_line.text line_type = curr_line.line_type line_buffer = [curr_line] if line_type == "list_item" and running_line[0] in "�\\*,.?•\\➢ƒ–\\'\"—": running_line = running_line[1:].lstrip() block = { "block_idx": block_idx, "block_text": running_line, "block_type": line_type, "text_group_start_idx": -1, "block_list": [], "header_block_idx": header_block_idx, "level": 0, } result.append(block) return result def line_list_check(prev_line, curr_line, list_char): # if prev_line is list_item and list_char matches curr_line if list_char == curr_line.text[0] and list_char not in ["”", "'", '"', "("]: return True # same char is alpha if prev_line.text[0] == curr_line.text[0] and prev_line.text[0].isalpha(): if len(prev_line.text) >= 2 and prev_line.text[1].isupper(): # spell check first word first_word = prev_line.text.split(" ")[0] first_word = first_word.replace("'", "") correct_word = su.segment(first_word) if first_word[1:] == correct_word: return True # same char is not alpha but not digit if prev_line.text[0] == curr_line.text[0] and not ( prev_line.text[0].isalpha() or prev_line.text[0].isdigit() or list_char not in ["”", "'", '"', "("] ): return True return False def should_join_table(prev_line, curr_line, ents_aligned): """ Check if next line should be joined as a tr. This makes no assumption if the current line is a table """ # print() # print("Checking to join tr", prev_line.visual_line.text_list, "\n", curr_line.visual_line.text_list) # check list of spaced words curr_line_ents = len(prev_line.visual_line.text_list) next_line_ents = len(curr_line.visual_line.text_list) ent_match = ( curr_line_ents == next_line_ents and curr_line_ents >= 2 ) # tr should have at least two elements # print("tab check", prev_line.visual_line.tab_count, curr_line.visual_line.tab_count) tab_match = ( prev_line.visual_line.tab_count == curr_line.visual_line.tab_count and curr_line.visual_line.tab_count > 0 ) # casing should also be the same same_case = ( prev_line.text[0].islower() == curr_line.text[0].islower() or prev_line.text[0].isupper() == curr_line.text[0].isupper() ) colon_check = ( prev_line.hit_colon and curr_line.hit_colon and prev_line and same_case and not prev_line.incomplete_line ) # if prev_line.hit_colon and curr_line.hit_colon: # print() # print("colon check") # print(prev_line.visual_line.text_list) # print(curr_line.visual_line.text_list) # col_check # print(tab_match, ent_match, colon_check) tab_check = prev_line.visual_line.tab_count or curr_line.visual_line.tab_count return ( (tab_match and ent_match) or colon_check or (ents_aligned and ent_match and tab_check) ) def check_page_spacing(prev_line, curr_line, spacing_dict): # print("^"*50) # print("checking page stats") # print(prev_line.visual_line.start_fs, prev_line.visual_line.end_fs, prev_line.text) # print(curr_line.visual_line.start_fs, curr_line.visual_line.end_fs, curr_line.text) # print() diff_top = round(curr_line.visual_line.start_y - prev_line.visual_line.end_y) # find best fs reference prev_line_fs = {prev_line.visual_line.start_fs, prev_line.visual_line.end_fs} curr_line_fs = {curr_line.visual_line.start_fs, curr_line.visual_line.end_fs} same_fs = prev_line_fs.intersection(curr_line_fs) fs = min(same_fs) if same_fs else curr_line.visual_line.start_fs min_check = ( spacing_dict[(fs, diff_top - 1)] if (fs, diff_top - 1) in spacing_dict else None ) max_check = ( spacing_dict[(fs, diff_top + 1)] if (fs, diff_top + 1) in spacing_dict else None ) normal_check = (fs, diff_top) in spacing_dict and spacing_dict[(fs, diff_top)] > 3 if min_check or normal_check or max_check: # get all fs in spacing dict # see if the diff top is a min # print("checking space dict") distance_list = [] for val in spacing_dict: if val[0] == fs and val[1] > 0 and spacing_dict[val] > 2: distance_list.append((val, val[1])) # print(distance_list) val = min(distance_list) if len(distance_list) else [] if len(val): join_fs, join_top = val[0] if len(val): join_fs, join_top = val[0] if val[0] == (fs, diff_top): # or close # print("SHOULDJOIN") return True elif ( join_fs == fs and ((diff_top - 1) == join_top) or ((diff_top + 1) == join_top) ): return True return False def compute_overlap( start_x0: float, end_x0: float, start_x1: float, end_x1: float, divide_by_min=True, ) -> float: """ Computes the % of intersection (overlap) of two lines w.r.t. the shortest line """ width_x0 = abs(end_x0 - start_x0) width_x1 = abs(end_x1 - start_x1) if start_x0 <= start_x1 <= end_x0: intersect = min(abs(end_x0 - start_x1), width_x1) elif start_x0 <= end_x1 <= end_x0: intersect = min(abs(end_x1 - start_x0), width_x1) elif start_x1 <= start_x0 <= end_x0 <= end_x1: intersect = abs(end_x0 - start_x0) else: intersect = 0.0 if divide_by_min: intersect /= min(width_x0, width_x1) + 1e-5 else: intersect /= max(width_x0, width_x1) + 1e-5 return intersect def compute_overlap_top_bottom( start_x0: float, end_x0: float, start_x1: float, end_x1: float, ) -> float: """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ width_x1 = abs(end_x1 - start_x1) if width_x1 == 0: return 0.0 if start_x0 <= start_x1: # measure from left to right if end_x1 <= end_x0: # if start and end both less, full in subset return 1.0 return (end_x1 - start_x0) / width_x1 else: # measure from bottom start if end_x1 <= start_x0: return 0.0 return (end_x1 - start_x0) / width_x1 def compute_bottom_top_overlap(start_x0, end_x0, start_x1, end_x1): """ This is different from the above function. Finds percentage overlap of top to bottom. Score of 100% is possible doesn't reference the shortest line """ # print(start_x0, end_x0) # print(start_x1, end_x1) if start_x0 == start_x1 and end_x0 != start_x0: # aligned with bottom line # print() # print("bottom overlap", (end_x1 - start_x1) / (end_x0 - start_x0)) return (end_x1 - start_x1) / (end_x0 - start_x0) # other conditions # elif start_x0 < start_x1 and end_x0 > end_x1: # to the left of bottom line # return # else: #to the right of bottom line return 1.0 # header check for lines with similar font # header check for lines with similar font def visual_header_check(prev_line, curr_line, same_font): # check top overlap (small) if the font size is bigger # print() # print("visual_header check:") # print("prev", prev_line.text) # print("checking", curr_line.text) # top also has to be higher # print("prev_line.visual_line.start_y, prev_line.visual_line.end_y") # print(prev_line.visual_line.start_y, prev_line.visual_line.end_y) # print(prev_line.visual_line.start_y, curr_line.visual_line.start_y) if prev_line.visual_line.wrapped_page: return False if prev_line.visual_line.start_y < curr_line.visual_line.start_y: prev_line_width = prev_line.visual_line.max_x - prev_line.visual_line.min_x curr_line_width = curr_line.visual_line.max_x - curr_line.visual_line.min_x # print("prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x") # print(prev_line.visual_line.min_x, prev_line.visual_line.max_x, prev_line.visual_line.end_x) # print("curr_line.visual_line.min_x, curr_line.visual_line.max_x") # print(curr_line.visual_line.min_x, curr_line.visual_line.max_x) # print("prev_line_width / curr_line_width") # print(prev_line_width / curr_line_width) # print("prev_line_width, curr_line_width") # print(prev_line_width, curr_line_width) if curr_line_width == 0: return False # print(round(prev_line.visual_line.min_x), round(curr_line.visual_line.min_x)) if round(prev_line.visual_line.min_x) == round(curr_line.visual_line.min_x): if round(prev_line_width) == round(curr_line_width): # print() # print("NOT A HEADER1") return False offset = 0 # print(prev_line.visual_line.min_x, curr_line.visual_line.min_x) # print(prev_line.visual_line.min_x <= curr_line.visual_line.min_x) if prev_line.visual_line.min_x <= curr_line.visual_line.min_x: offset = curr_line.visual_line.min_x - prev_line.visual_line.min_x # offset # print("(prev_line_width - offset) / curr_line_width") # print((prev_line_width - offset) / curr_line_width) overlap_percentage = (prev_line_width - offset) / curr_line_width different_font_style = ( prev_line.visual_line.fw != curr_line.visual_line.fw or prev_line.visual_line[1] != curr_line.visual_line[1] or prev_line.visual_line.fs > curr_line.visual_line.fs ) if ( overlap_percentage < 0.3 or (different_font_style and overlap_percentage < 0.6) or (prev_line.line_type == "header" and different_font_style) # or (prev_line.is_header and different_font_style) ): # print("HEADER INDENT", prev_line.is_header) # print("overlap rule::", (prev_line_width - offset) / curr_line_width) # print(True) return True # print(False) # print() # print("NOT A HEADER") return False def visual_header_from_stats(prev_line, curr_line, page_stats): prev_fs = prev_line.visual_line.fs curr_fs = curr_line.visual_line.fs median_val = round(page_stats["median_fs"]) max_val = round(max(page_stats["fs_list"])) max_val_diff = ((max_val - prev_fs) / max_val) < 0.2 if max_val != 0 else True prev_fs_diff = round(prev_fs - median_val) curr_fs_diff = ( round(curr_fs - median_val) if round(curr_fs - median_val) else 0.8 ) # curr_fs is the median varied_set = len(set(page_stats["fs_list"])) >= 4 rounded_fs_count = Counter([round(x, 3) for x in page_stats["fs_list"]]) unique_text = rounded_fs_count[round(prev_fs, 3)] / len(page_stats["fs_list"]) prev_curr_ratio_from_median = prev_fs_diff / curr_fs_diff # print("prev_fs, curr_fs", prev_fs, curr_fs) # print("unique text") # print(rounded_fs_count[round(prev_fs, 3)], len(page_stats["fs_list"]) ) # print("visual_header check", len(set(page_stats["fs_list"]))) # print("varied_set", varied_set, "unique_text", unique_text) # print(rounded_fs_count) # print() # close from max or far enough from median bigger_text = max_val_diff or ( prev_curr_ratio_from_median > 2 ) # TODO text must also be relatively uncommon if varied_set and (unique_text <= 0.08): if bigger_text and (prev_fs_diff > 1) and (prev_fs_diff - curr_fs_diff) > 0.3: # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True # header join if bigger_text and curr_fs == prev_fs and (prev_fs_diff > 1): # print(max_val_diff) # print(prev_fs, prev_line.text) # print(curr_fs, curr_line.text) # print() return True return False # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): # def visual_clean_lines(lines, page_stats={}, page_info_dict={}): def check_tr_alignment(prev_line, curr_line): # print("-=" * 50) # print("check_tr_alignment!") # print(prev_line.text) # print(curr_line.text) # print() prev_ents = len(prev_line.visual_line.text_list) curr_ents = len(curr_line.visual_line.text_list) prev_positions = prev_line.visual_line.start_x_list curr_positions = curr_line.visual_line.start_x_list prev_line_start_ents = prev_line.visual_line.start_x_list_single_ent curr_line_start_ents = curr_line.visual_line.start_x_list_single_ent # print(prev_line_start_ents) # print(curr_line_start_ents) same_ents = prev_ents > 1 and abs(prev_ents - curr_ents) <= 1 if len(prev_line_start_ents) == len(curr_line_start_ents): prev_positions = prev_line_start_ents curr_positions = curr_line_start_ents if len(prev_line_start_ents) == len(curr_positions) and len( prev_line_start_ents, ) != len( prev_positions, ): # joined p_tags prev_positions = prev_line_start_ents if not same_ents: # print("check_tr_alignment False1") # print(prev_ents, curr_ents) return False # print("CHECKING POSITIONS") # print(prev_positions) # print(curr_positions) for p_x, c_x in zip(prev_positions, curr_positions): p_x = round(p_x) c_x = round(c_x) if abs(p_x - c_x) > 100: # print("False") # print("check_tr_alignment False3") return False # print("check_tr_alignment True") return True def check_layout(prev_line, curr_line, prev_above_curr): prev_line_width = range( int(prev_line.visual_line.min_x), int(prev_line.visual_line.max_x), ) # weird edge case if not prev_line_width: prev_line_width = range( int(prev_line.visual_line.max_x), int(prev_line.visual_line.min_x), ) curr_line_width = range( int(curr_line.visual_line.min_x), int(curr_line.visual_line.max_x), ) prev_line_width = set(prev_line_width) prev_curr_overlap = prev_line_width.intersection(curr_line_width) if prev_curr_overlap and not prev_above_curr: # print(prev_line.text) # print(curr_line.text) # print("misplaced text group") # print() return True return False def order_blocks(blocks): block_group_dict = defaultdict(list) for idx, block in enumerate(blocks): # print(idx, "block-group", block["group_id"], block["block_type"], block['block_text']) group_id = block["group_id"] block_group_dict[group_id].append(block) block_group_list = [] # list that holds tuples (group_id, y_pos) for block_group_id in block_group_dict: block_group_list.append( (block_group_id, block_group_dict[block_group_id][0]["y"]), ) # append starting y position of group block_group_list = sorted( block_group_list, key=lambda x: x[1], ) # sort block groups by y position # get list of ordered block group keys ordered_blocks = [] for block_group_id, y in block_group_list: ordered_blocks += block_group_dict[block_group_id] # for b in original_blocks: # re-index blocks and headers based off of new ordering header_idx = 0 for idx, block in enumerate(ordered_blocks): block["block_idx"] = idx if block["block_type"] == "header": header_idx = idx ordered_blocks[idx]["header_block_idx"] = header_idx return ordered_blocks def visual_clean_lines( lines, page_stats={}, page_info_dict={}, page_idx=0, line_set={}, ): page_blocks = [] header_block_idx = -1 block_idx = 0 # block_idx = page_idx style_dict = {} join_font_spacing = False prev_line = None text_list = [] prev_ents = 0 curr_ents = 0 is_incomplete = False colon_rule = False text_group_start = True text_group_start_idx = 0 prev_line = None next_line = None # for idx, line in enumerate(lines[12:14]): sentence_visual_end = False group_id = 0 for idx, line in enumerate(lines): # print(idx) line_str, style_dict, text_list = ( line["text"], line["style"], line["text_list"], ) line_str = " ".join(line_str.split()) if should_skip(line_str): continue if line_str in line_set: continue if len(line_str.split()) > 8: line_set.add(line_str) curr_line = line_parser.Line( line_str=line_str, style_dict=style_dict, text_list=text_list, page_details=page_stats, ) if prev_line is None: # initialize memory of previous line. # this will update with join decisions list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "list_char": list_char, "fs": curr_line.visual_line.start_fs, "text_group_start_idx": text_group_start_idx, "block_list": curr_line.visual_line.text_list, "line": curr_line, "y": curr_line.visual_line.start_y, "group_id": group_id, } prev_line = curr_line block_idx += 1 # if (idx <= 3) or (idx >= len(lines) - 3): # line_without_numbers = re.sub(r"[^a-zA-Z]+", "", line_str).strip() # if line_without_numbers: # # track block_idx for de-duplication # line_set[line_without_numbers].append((page_idx, block_idx)) page_blocks.append(block) continue # print("--" * 50) # print(prev_line.line_type, "\n", prev_line.text) # print(prev_ents) # print(prev_line.visual_line.fw_list) # print(prev_line.visual_line.font_family) # print(prev_line.visual_line.fs, prev_line.visual_line.fw, "prev_line:", prev_line.line_type, prev_line.text) # print(prev_line.visual_line.mode_fs) # print(curr_line.line_type, "\n", curr_line.text) # print(curr_ents) # print() # print(curr_line.visual_line.font_family) # print(curr_line.visual_line.mode_fs) # print(curr_line.visual_line.fs, curr_line.visual_line.fw, "curr_line:", curr_line.line_type, curr_line.text) if ( len(prev_line.text) > 1 and len(curr_line.text) > 1 and prev_line.text[:2] == curr_line.text[:2] and prev_line.text[1] == " " and not (prev_line.text[0].isdigit() or curr_line.text[0].isdigit()) and not (prev_line.text[0].isalpha() or curr_line.text[0].isalpha()) ): curr_line.line_type = "list_item" curr_line.is_list_item = True curr_line.is_list_or_row = True if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["block_type"] = "list_item" page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() same_start_fs = ( abs(prev_line.visual_line.start_fs - curr_line.visual_line.start_fs) < 0.5 ) same_end_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.end_fs) < 0.5 ) same_end_start_fs = ( abs(prev_line.visual_line.end_fs - curr_line.visual_line.start_fs) < 0.5 ) prev_above_curr = ( True if prev_line.visual_line.end_y < curr_line.visual_line.start_y else False ) y_diff = curr_line.visual_line.start_y - prev_line.visual_line.start_y top_overlap = compute_overlap_top_bottom( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) bottom_overlap = compute_bottom_top_overlap( start_x0=prev_line.visual_line.start_x, end_x0=prev_line.visual_line.end_x, start_x1=curr_line.visual_line.start_x, end_x1=curr_line.visual_line.end_x, ) prev_overlap_curr = True if bottom_overlap or top_overlap else False use_visual_join = True if prev_above_curr and prev_overlap_curr else False if not use_visual_join and prev_line.incomplete_line: join_font_spacing = True if not (prev_line.is_table_row or curr_line.is_table_row): if page_stats["n_lines"] <= 3: join_font_spacing = True else: join_font_spacing = check_page_spacing( prev_line, curr_line, page_stats["fs_and_diff_next_y"], ) # if the font is different and font-family is different different_font_family = ( curr_line.visual_line.font_family != prev_line.visual_line.font_family ) different_common_fs = ( prev_line.visual_line.mode_fs != curr_line.visual_line.mode_fs and prev_line.visual_line.start_fs != curr_line.visual_line.start_fs ) different_font = ( different_font_family and different_common_fs and not join_font_spacing ) # start and end characters are same font or the mode of fonts of both lines is the same same_font = ( (prev_line.visual_line.fs == curr_line.visual_line.fs) or (same_start_fs and same_end_fs) or same_end_start_fs or prev_line.visual_line.mode_fs == curr_line.visual_line.mode_fs ) and not different_font prev_ents = ( len(prev_line.visual_line.text_list) if not prev_line.line_type == "list_item" else 0 ) curr_ents = ( len(curr_line.visual_line.text_list) if not curr_line.is_list_item else 0 ) ents_aligned = check_tr_alignment(prev_line, curr_line) is_incomplete_sent = ( prev_line.incomplete_line and not prev_line.ends_with_period or prev_line.ends_with_comma ) # logic using line after curr if idx + 1 < len(lines): # this is inefficent as line_parser is called twice, # once for next_line and once for curr_line. next_line = lines[idx + 1] # print("NEXT LINE\n", next_line['text']) next_line_str, next_style_dict, next_text_list = ( next_line["text"], next_line["style"], next_line["text_list"], ) next_line = line_parser.Line( line_str=next_line_str, style_dict=next_style_dict, text_list=next_text_list, page_details=page_stats, ) # if the last line was not a table, check if the next line is a table to avoid single tr if prev_line.line_type != "table_row" and not ents_aligned: # check if the next line is a table and matches curr_line next_line_tr = next_line.line_type == "table_row" or should_join_table( curr_line, next_line, False, ) if not next_line_tr and curr_line.line_type == "table_row": curr_line.line_type = "para" # if the next line is joinable by visual stats but prev and curr are not # don't join the line (only true by x-span check and y is below for prev cur) # if this is not true ignore the rule prev_not_above_next = ( next_line and prev_line.visual_line.start_y > next_line.visual_line.start_y ) next_line_join = False if next_line and check_layout(prev_line, next_line, prev_not_above_next): next_line_join = check_page_spacing( curr_line, next_line, page_stats["fs_and_diff_next_y"], ) # if the prev line is not visually joinable and the curr_next is # make sure the prev_line doesn't join the curr_line curr_next_visual_join = not join_font_spacing and next_line_join # print() # print("is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line") # print(is_incomplete_sent, (join_font_spacing and not sentence_visual_end), curr_line.continuing_line) # print("join_font_spacing:,", join_font_spacing) is_incomplete = ( is_incomplete_sent or (join_font_spacing and not sentence_visual_end) or curr_line.continuing_line ) # print("is_incomplete", is_incomplete) has_overlap_with_min = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=True, ) > 0.7 ) is_below = curr_line.visual_line.start_y - prev_line.visual_line.start_y > 0 is_visually_apart = (has_overlap_with_min and not is_below) or ( not has_overlap_with_min and is_below ) above_bold_below_not = ( prev_line.visual_line.fw >= 600.0 and curr_line.visual_line.fw <= 400.0 ) has_overlap_with_max = ( compute_overlap( curr_line.visual_line.start_x, curr_line.visual_line.end_x, prev_line.visual_line.start_x, prev_line.visual_line.end_x, divide_by_min=False, ) > 0.3 ) is_not_header_over_para = True if ( above_bold_below_not and not has_overlap_with_max and prev_line.line_type == "header" and not prev_line.incomplete_line ): is_not_header_over_para = False # print("header over para check") # print("""above_bold_below_not # and not has_overlap_with_max # and prev_line.line_type == "header" # """) # print(above_bold_below_not) # print(has_overlap_with_max, j) # print(prev_line.line_type == "header") # print() # print(is_not_header_over_para) ########### # List item if line_list_check(prev_line, curr_line, page_blocks[-1]["list_char"]): prev_line.line_type = "list_item" curr_line.line_type = "list_item" curr_line.is_list_item = True # change prev_line to list item if page_blocks[-1]["block_type"] != "list_item": page_blocks[-1]["list_char"] = page_blocks[-1]["block_text"][0] page_blocks[-1]["block_text"] = page_blocks[-1]["block_text"][ 1: ].lstrip() page_blocks[-1]["block_type"] = "list_item" close_text_y = ( curr_line.visual_line.start_y - curr_line.visual_line.mode_fs - prev_line.visual_line.start_y - prev_line.visual_line.mode_fs ) <= 0 aligned_text = curr_line.visual_line.start_x == prev_line.visual_line.start_x title_text = False if len(lines) < 10: title_text = top_overlap == 1.0 and close_text_y and aligned_text visual_header = visual_header_check(prev_line, curr_line, same_font) list_item_rule = curr_line.has_list_char or ( curr_line.numbered_line and not ( (prev_line.incomplete_line and curr_line.continuing_line) or join_font_spacing ) ) last_2_block_tr = False if len(page_blocks) >= 2: last_block_tr = ( page_blocks[-1]["block_type"] == "table_row" and page_blocks[-2]["block_type"] == "table_row" ) if not last_block_tr and curr_line.line_type == "para": # check to join if prev_line.incomplete_line and curr_line.continuing_line: last_2_block_tr = True no_space_join = prev_line.ends_with_period and curr_line.text[0] != " " visual_header_by_stats = visual_header_from_stats( prev_line, curr_line, page_stats, ) header_join = False common_list = curr_line.has_list_char or prev_line.has_list_char if ( visual_header_by_stats and curr_line.incomplete_line and same_font and not (prev_line.is_table_row or curr_line.is_table_row or common_list) ): header_join = True # print("LINEJOIN CHECK") # print("positive\n", "*" * 10) # print(f"\nsame_font:{same_font}", # f"\nis_incomplete:{is_incomplete}", # f"\nis_not_header_over_para:{is_not_header_over_para}") # print("join_font_spacing", join_font_spacing) # print("header join", header_join) # print() # print("negative\n", "*" * 10) # print(f"\nis_visually_apart:{is_visually_apart}", # f"\nshould_join_table(prev_line, curr_line): {should_join_table(prev_line, curr_line, ents_aligned)}", # f"\ncurr_line.is_list_or_row:{curr_line.is_list_or_row}", # f"\ncurr_line table {curr_line.line_type == 'table_row'}", # f"\ncurr_line list {curr_line.is_list_item}", # f"\nvisual_header {visual_header}", # f'\nprev_line.line_type == "table_row", {prev_line.line_type == "table_row"}') if ( same_font and not should_join_table(prev_line, curr_line, ents_aligned) and not (curr_line.line_type == "table_row" or list_item_rule) and not (prev_line.line_type == "table_row" and not last_2_block_tr) and is_incomplete and not curr_next_visual_join # is_visually_apart and not visual_header or not check_parentheses(prev_line.text) and is_not_header_over_para and not no_space_join or title_text or header_join ): # print("JOIN") if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False if page_stats["n_lines"] <= 3: page_blocks[-1]["block_type"] = "header" elif ( not prev_line.line_type == "list_item" ): # and not curr_line.visual_line.is_header: page_blocks[-1]["block_type"] = "para" new_text = formatter.connect( prev_line.text.rstrip(), curr_line.text.lstrip(), ) new_text_list = ( prev_line.visual_line.text_list + curr_line.visual_line.text_list ) # print("Max ex min ex assignment") max_x = max(prev_line.visual_line.max_x, prev_line.visual_line.max_x) min_x = min(prev_line.visual_line.min_x, curr_line.visual_line.min_x) prev_line_type = prev_line.line_type page_blocks[-1]["block_text"] = new_text prev_start_y = prev_line.visual_line.start_y curr_start_y = curr_line.visual_line.start_y prev_end_y = prev_line.visual_line.end_y wrapped_page = prev_line.visual_line.wrapped_page # pass the line parser attributes prev_line = curr_line # add appended text and text_list, preserve the line type prev_line.text = new_text prev_line.visual_line.start_y = prev_start_y prev_line.visual_line.text_list = new_text_list prev_line.line_type = prev_line_type prev_line.visual_line.min_x = min_x prev_line.visual_line.max_x = max_x prev_line.visual_line.wrapped_page = wrapped_page if curr_start_y < prev_end_y: prev_line.visual_line.wrapped_page = True # print(prev_start_y) # print("Join") # print() # print("-" * 50) # print() # new block else: # print("NEW block") # print("*" * 50) if not is_visually_apart and bottom_overlap < 0.5: # this would signify end of paragraph sentence_visual_end = True else: sentence_visual_end = False # print("-"*50) colon_rule = ( prev_line.hit_colon and curr_line.hit_colon and prev_ents == curr_ents ) # normal case tab_check_join = { prev_line.visual_line.tab_count_join, prev_line.visual_line.tab_count, } & {curr_line.visual_line.tab_count_join, curr_line.visual_line.tab_count} tab_check = sum(tab_check_join) > 0 # print("-+" * 50) # print("TAB POSITIONS") # print(prev_line.text) # print(prev_line.visual_line.start_x_list) # print(prev_line.visual_line.start_x_list_single_ent) # print(prev_line.visual_line.tab_count) # print(prev_line.visual_line.tab_count_join) # # print(curr_line.text) # print(curr_line.visual_line.start_x_list) # print(curr_line.visual_line.start_x_list_single_ent) # print(curr_line.visual_line.tab_count) # print(curr_line.visual_line.tab_count_join) # print("tabcheck", tab_check) # print("ents_aligned", ents_aligned) # print(prev_ents, curr_ents) # print(curr_line.visual_line.text_list) # print("-+" * 50) if visual_header_by_stats and prev_line.line_type != "table_row": page_blocks[-1]["block_type"] = "header" elif ( colon_rule and prev_ents == 1 and prev_line.line_type != "list_item" and not (prev_line.incomplete_line and curr_line.continuing_line) ): # print("Table Conversion") # print() # print("colon check") # print(prev_line.text.split(":")) # print(curr_line.text.split(":")) # print("TR1") new_text_list = prev_line.text.split(":") new_text_list = [new_text_list[0] + ":", new_text_list[1:]] page_blocks[-1]["block_type"] = "table_row" page_blocks[-1]["block_list"]: new_text_list if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" curr_line.is_list_or_row = True # print("Table Conversion!") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR3") elif ( tab_check and ents_aligned and prev_line.line_type != "list_item" ) or (colon_rule and not prev_line.incomplete_line): # print("Table Conversion") # print(prev_ents, curr_ents) # print(page_blocks[-1]["block_text"]) # print("TR2") page_blocks[-1]["block_type"] = "table_row" if text_group_start: text_group_start = False text_group_start_idx = page_blocks[-1]["block_idx"] page_blocks[-1]["text_group_start_idx"] = text_group_start_idx curr_line.line_type = "table_row" else: text_group_start = True text_group_start_idx = -1 list_char = "" if curr_line.line_type == "list_item": list_char = curr_line.text[0] curr_line.text = curr_line.text[1:].lstrip() if curr_line.line_type == "header": header_block_idx = block_idx if (visual_header or visual_header_by_stats) and not ( prev_line.line_type == "list_item" or prev_line.line_type == "numbered_list_item" ): page_blocks[-1]["block_type"] = "header" # print() # print("*" * 40) # print("NEW BLOCK") # print() # print("*" * 40) # print(curr_line.line_type, curr_line.text) # group attribute if check_layout(prev_line, curr_line, prev_above_curr) or y_diff < 0: group_id += 1 block = { "block_idx": block_idx, "block_text": curr_line.text, "block_type": curr_line.line_type, "header_block_idx": header_block_idx, "block_group": [curr_line.visual_line.text_list], "text_group_start_idx": text_group_start_idx, "list_char": list_char, "group_id": group_id, "fs": curr_line.visual_line.start_fs, "x": curr_line.visual_line.start_x, "y": curr_line.visual_line.start_y, "line": curr_line, "block_list": curr_line.visual_line.text_list, } # This is to account for when the headers get false positive #TODO improve header code prev_text = page_blocks[-1]["block_text"] if page_blocks[-1]["block_type"] == "header" and ( len(sent_tokenize(prev_text)) >= 2 or len(prev_text.split()) > 16 ): page_blocks[-1]["block_type"] = "para" prev_line = curr_line block_idx += 1 page_blocks.append(block) # not too many blocks there may be title text missed if len(page_blocks) <= 2: for idx, block in enumerate(page_blocks): if "." not in block["block_text"] and len(block["block_text"].split()) < 10: page_blocks[idx]["block_type"] = "header" page_blocks = order_blocks(page_blocks) return page_blocks, line_set def clean_line(line): line = line.replace("\n", " ") line = line.replace("\t", " ") line = line.strip() return line def fix_spaced_characters(line_text): line_text = re.sub(r"\s+", "", line_text) return su.segment(line_text) def connect(prev, curr): has_space = prev.endswith(" ") result = prev + ("" if has_space else " ") + curr return result def get_numbers(line): # test = re.compile(r"[0-9]+\.?[0-9]?") regex = re.compile(r"\$?(\d*(\d\.?|\.\d{1,2}))$") return regex.search(line) def check_block_join(prev_block, block): prev_text = prev_block["block_text"] curr_text = block["block_text"] blocks_are_paras = ( prev_block["block_type"] == "para" and block["block_type"] == "para" ) if len(prev_text.strip()) and len(curr_text.strip()) and blocks_are_paras: prev_line = line_parser.Line(prev_block["block_text"]) curr_line = line_parser.Line(block["block_text"]) if prev_line.incomplete_line or curr_line.continuing_line: return True return False def join_blocks(page_blocks, blocks): prev_last_block = page_blocks[-1][-1] # update page blocks and blocks # prev_blocks = page_blocks[-1] # last_prev_block = prev_blocks[-1] # check to join last_prev_block with first blocks[0] # if it's a join, pop the block and join, subtract block indexes prev_last_block["block_text"] = ( prev_last_block["block_text"].rstrip() + " " + blocks[0]["block_text"].lstrip() ) prev_last_block["block_list"].append(blocks[0]["block_list"]) # print(prev_block) page_blocks[-1][-1] = prev_last_block for block in blocks[1:]: block["block_idx"] -= 1 return page_blocks, blocks[1:]
evocodebench_data_266
import json import re import numpy as np from nltk import load from nltk import PunktSentenceTokenizer nltk_abbs = load("tokenizers/punkt/{}.pickle".format("english"))._params.abbrev_types class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.floating): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() return super(NpEncoder, self).default(obj) nlm_abbs = { "u.s", "u.s.a", "n.w", "p.o", "po", "st", "ave", "blvd", "ctr", "cir", "ct", "dr", "mtn", "apt", "hwy", "esq", "fig", "no", "sec", "n.a", "s.a.b", "non-u.s", "cap", 'u.s.c', "ste", } nlm_special_abbs = { "inc", } abbs = nltk_abbs | nlm_abbs nltk_tokenzier = PunktSentenceTokenizer() rules = [] for abb in abbs: # match start of the sentence pattern = fr"^{abb}.\s" replaced = f"{abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match token in sentence pattern = fr"\s{abb}.\s" replaced = f" {abb}_ " # case insensitive replacement for synonyms rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) for abb in nlm_special_abbs: pattern = fr"{abb}\." replaced = f"{abb}_" rule = re.compile(pattern, re.IGNORECASE) rules.append((rule, replaced)) # match content inside brackets # (?<=\() ==> starts with "(" # ([^)]+) ==> repeat not ")" # (?=\))") ==> ends with ")" bracket_rule = re.compile(r"(?<=\()([^)]+)(?=\))") space_rule = re.compile(r"\s([.'](?:\s|$|\D))", re.IGNORECASE) # Remove any space between punctuations (.') quotation_pattern = re.compile(r'[”“"‘’\']') def sent_tokenize(org_texts): if not org_texts: return org_texts sents = [] # in case org_texts has \n, break it into multiple paragraph # edge case for html and markdown for org_text in org_texts.split("\n"): org_text = space_rule.sub(r'\1', org_text) modified_text = re.sub(r'^([.,?!]\s+)+', "", org_text) # To handle bug https://github.com/nltk/nltk/issues/2925 orig_offset = abs(len(org_text) - len(modified_text)) # do not break bracket for span_group in bracket_rule.finditer(modified_text): start_byte, end_byte = span_group.span() span = modified_text[start_byte:end_byte] # skip this logic when span is too big? disabled for now # if len(span.split()) >= 10: # continue modified_text = modified_text.replace( f"({span})", f"_{span.replace('.','_')}_", ) for rule, replaced in rules: modified_text = rule.sub(replaced, modified_text) # Normalize all the quotation. modified_text = quotation_pattern.sub("\"", modified_text) modified_sents = nltk_tokenzier.tokenize(modified_text) offset = orig_offset sent_idx = 0 while offset < len(modified_text) and sent_idx < len(modified_sents): if modified_text[offset] == " ": offset += 1 continue # cut org_text based on lengths of modified_sent modified_sent = modified_sents[sent_idx] sents.append(org_text[offset: offset + len(modified_sent)]) offset += len(modified_sent) sent_idx += 1 if len(sents) >= 2 and re.match(r"^.\.$", sents[0]): sents[1] = sents[0] + " " + sents[1] sents = sents[1:] return sents def divide_list_into_chunks(lst, n): # looping till length l for i in range(0, len(lst), n): yield lst[i : i + n] def normalize(X): norms = np.einsum("ij,ij->i", X, X) np.sqrt(norms, norms) X /= norms[:, np.newaxis] return X def detect_block_center_aligned(block, page_width): center_location = block["box_style"][1] + block["box_style"][3] / 2 center_aligned = abs(center_location - page_width / 2) < page_width * 0.01 width_check = block["box_style"][3] * 2 < page_width return center_aligned and width_check def detect_block_center_of_page(block, page_height): bottom = block["box_style"][0] + block["box_style"][4] center_of_page = (page_height / 3) <= bottom <= ((2 * page_height) / 3) return center_of_page def check_char_is_word_boundary(c): if c.isalnum(): return False if c in ['-', '_']: return False return True def blocks_to_sents(blocks, flatten_merged_table=False, debug=False): block_texts = [] block_info = [] header_block_idx = -1 header_match_idx = -1 header_match_idx_offset = -1 header_block_text = "" is_rendering_table = False is_rendering_merged_cells = False table_idx = 0 levels = [] prev_header = None block_idx = 0 for block_idx, block in enumerate(blocks): block_type = block["block_type"] if block_type == "header": if debug: print("---", block["level"], block["block_text"]) header_block_text = block["block_text"] header_block_idx = block["block_idx"] header_match_idx = header_match_idx_offset + 1 if prev_header and block["level"] <= prev_header['level'] and len(levels) > 0: while len(levels) > 0 and levels[-1]["level"] >= block["level"]: if debug: print("<<", levels[-1]["level"], levels[-1]["block_text"]) levels.pop(-1) if debug: print(">>", block["block_text"]) levels.append(block) prev_header = block if debug: print("-", [str(level['level']) + "-" + level['block_text'] for level in levels]) block["header_text"] = header_block_text block["header_block_idx"] = header_block_idx block["header_match_idx"] = header_match_idx block["block_idx"] = block_idx level_chain = [] for level in levels: level_chain.append({"block_idx": level["block_idx"], "block_text": level["block_text"]}) # remove a level for header if block_type == "header": level_chain = level_chain[:-1] level_chain.reverse() block["level_chain"] = level_chain # if block_type == "header" or block_type == "table_row": if ( block_type == "header" and not is_rendering_table and 'is_table_start' not in block ): block_texts.append(block["block_text"]) # append text from next block to header block # TODO: something happened here, it messed up the match_text # if block_type == "header" and block_idx + 1 < len(blocks): # block[ # "block_text" # ] += blocks[block_idx+1]['block_text'] block_info.append(block) header_match_idx_offset += 1 elif ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" ) and not is_rendering_table: block_sents = block["block_sents"] header_match_idx_offset += len(block_sents) for sent in block_sents: block_texts.append(sent) block_info.append(block) elif 'is_table_start' in block: is_rendering_table = True if 'has_merged_cells' in block: is_rendering_merged_cells = True elif 'is_table_start' not in block and not is_rendering_table and block_type == "table_row": block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if is_rendering_table: if is_rendering_merged_cells and "effective_para" in block and flatten_merged_table: eff_header_block = block["effective_header"] eff_para_block = block["effective_para"] eff_header_block["header_text"] = block["header_text"] eff_header_block["header_block_idx"] = block["block_idx"] eff_header_block["header_match_idx"] = header_match_idx_offset + 1 eff_header_block["level"] = block["level"] + 1 eff_header_block["level_chain"] = block["level_chain"] eff_para_block["header_block_idx"] = block["block_idx"] eff_para_block["header_match_idx"] = header_match_idx_offset + 1 eff_para_block["level"] = block["level"] + 2 eff_para_block["level_chain"] = [ { "block_idx": eff_header_block["block_idx"], "block_text": eff_header_block["block_text"], }, ] + eff_header_block["level_chain"] header_match_idx_offset += 1 block_info.append(block["effective_header"]) block_texts.append(block["effective_header"]["block_text"]) for sent in block["effective_para"]["block_sents"]: block_texts.append(sent) block_info.append(block["effective_para"]) header_match_idx_offset += len(block["effective_para"]["block_sents"]) else: block["table_idx"] = table_idx block_info.append(block) block_texts.append(block["block_text"]) header_match_idx_offset += 1 if 'is_table_end' in block: is_rendering_table = False table_idx += 1 return block_texts, block_info def get_block_texts(blocks): block_texts = [] block_info = [] for block in blocks: block_type = block["block_type"] if ( block_type == "list_item" or block_type == "para" or block_type == "numbered_list_item" or block_type == "header" ): block_texts.append(block["block_text"]) block_info.append(block) return block_texts, block_info
evocodebench_data_267
"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take import json from collections import Counter import warnings import logging from typing import List, Union, Optional, Iterable import numpy as np from searcharray.phrase.scan_merge import scan_merge_ins from searcharray.phrase.posn_diffs import compute_phrase_freqs from searcharray.phrase.middle_out import PosnBitArray from searcharray.similarity import Similarity, default_bm25 from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list from searcharray.term_dict import TermMissingError logger = logging.getLogger(__name__) # When running in pytest import sys # noqa handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.ERROR) formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.ERROR) class Terms: """An indexed search doc - a single bag of tokenized words and positions.""" def __init__(self, postings, doc_len: int = 0, posns: Optional[dict] = None, encoded=False): self.postings = postings self.posns = None self.encoded = encoded self.doc_len = doc_len self.posns = posns def _validate_posns(self): # (For testing/assertions) - Confirm every term in positions also in postings if self.posns is None: return for term in self.posns: if term not in self.postings: raise ValueError(f"Term {term} in positions but not in postings. ") def termfreq(self, token): return self.postings[token] def terms(self): return self.postings.items() def positions(self, term=None): if self.posns is None: return {} if term is None: posns = self.posns.items() else: posns = self.posns[term] return posns def raw_positions(self, term_dict, term=None): if self.posns is None: return {} if term is None: posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()] else: posns = [(term_dict.get_term_id(term), self.posns[term])] return posns def tf_to_dense(self, term_dict): """Convert to a dense vector of term frequencies.""" dense = np.zeros(len(term_dict)) for term, freq in self.terms(): dense[term_dict.get_term_id(term)] = freq return dense def __len__(self): return len(self.postings) def __repr__(self): posting_keys = set(self.postings.keys()) rval = f"Terms({posting_keys})" return rval def __str__(self): return repr(self) def __eq__(self, other): # Flip to the other implementation if we're comparing to a SearchArray # to get a boolean array back if isinstance(other, SearchArray): return other == self same_postings = isinstance(other, Terms) and self.postings == other.postings if same_postings and self.doc_len == other.doc_len: return True def __lt__(self, other): # return isinstance(other, Terms) and hash(self) < hash(other) keys_both = set(self.postings.keys()).union(set(other.postings.keys())) # Sort lexically keys_both = sorted(keys_both) # Iterate as if these are two vectors of the same large dimensional vector sparse for key in keys_both: lhs_val = 0 rhs_val = 0 try: lhs_val = self.postings[key] except KeyError: pass try: rhs_val = other.postings[key] except KeyError: pass if lhs_val < rhs_val: return True elif lhs_val > rhs_val: return False else: continue return False def __le__(self, other): return self < other or self == other def __gt__(self, other): return not (self < other) and self != other def __hash__(self): return hash(json.dumps(self.postings, sort_keys=True)) class TermsDtype(ExtensionDtype): """Pandas dtype for terms.""" name = 'tokenized_text' type = Terms kind = 'O' @classmethod def construct_from_string(cls, string): if not isinstance(string, str): raise TypeError( "'construct_from_string' expects a string, got {}".format(type(string)) ) elif string == cls.name: return cls() else: raise TypeError( "Cannot construct a '{}' from '{}'".format(cls.__name__, string) ) @classmethod def construct_array_type(cls): return SearchArray def __repr__(self): return 'TermsDtype()' @property def na_value(self): return Terms({}) def valid_value(self, value): return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms) register_extension_dtype(TermsDtype) def ws_tokenizer(string): if pd.isna(string): return [] if not isinstance(string, str): raise ValueError("Expected a string") return string.split() def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray): tfs = {} labeled_posns = {} for term_idx in row.cols: term = term_dict.get_term(term_idx) tfs[term] = 1 enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id) labeled_posns[term] = enc_term_posns result = Terms(tfs, posns=labeled_posns, doc_len=doc_len, encoded=True) return result class SearchArray(ExtensionArray): """An array of tokenized text (Termss).""" dtype = TermsDtype() def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True): # Check dtype, raise TypeError if not is_list_like(postings): raise TypeError("Expected list-like object, got {}".format(type(postings))) self.avoid_copies = avoid_copies self.tokenizer = tokenizer self.term_mat, self.posns, \ self.term_dict, self.avg_doc_length, \ self.doc_lens = build_index_from_terms_list(postings, Terms) @classmethod def index(cls, array: Iterable, tokenizer=ws_tokenizer, truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray': """Index an array of strings using tokenizer.""" if not is_list_like(array): raise TypeError("Expected list-like object, got {}".format(type(array))) term_mat, posns, term_dict, avg_doc_length, doc_lens =\ build_index_from_tokenizer(array, tokenizer, batch_size=batch_size, truncate=truncate) postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies) postings.term_mat = term_mat postings.posns = posns postings.term_dict = term_dict postings.avg_doc_length = avg_doc_length postings.doc_lens = doc_lens return postings @classmethod def _from_sequence(cls, scalars, dtype=None, copy=False): """Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into).""" if dtype is not None: if not isinstance(dtype, TermsDtype): return scalars if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype(): return cls(scalars) # String types elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US': return cls(scalars) # Other objects elif isinstance(scalars, np.ndarray) and scalars.dtype != object: return scalars return cls(scalars) def memory_usage(self, deep=False): """Return memory usage of this array in bytes.""" return self.nbytes @property def nbytes(self): return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes def __getitem__(self, key): key = pd.api.indexers.check_array_indexer(self, key) # Want to take rows of term freqs if isinstance(key, numbers.Integral): try: rows = self.term_mat[key] doc_len = self.doc_lens[key] doc_id = key if doc_id < 0: doc_id += len(self) return _row_to_postings_row(doc_id, rows[0], doc_len, self.term_dict, self.posns) except IndexError: raise IndexError("index out of bounds") else: # Construct a sliced view of this array sliced_tfs = self.term_mat.slice(key) sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns arr = SearchArray([], tokenizer=self.tokenizer) arr.term_mat = sliced_tfs arr.doc_lens = self.doc_lens[key] arr.posns = sliced_posns arr.term_dict = self.term_dict arr.avg_doc_length = self.avg_doc_length return arr def __setitem__(self, key, value): """Set an item in the array.""" key = pd.api.indexers.check_array_indexer(self, key) if isinstance(value, pd.Series): value = value.values if isinstance(value, pd.DataFrame): value = value.values.flatten() if isinstance(value, SearchArray): value = value.to_numpy() if isinstance(value, list): value = np.asarray(value, dtype=object) if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value): raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}") # Cant set a single value to an array if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray): raise ValueError("Cannot set a single value to an array") try: is_encoded = False posns = None term_mat = np.asarray([]) doc_lens = np.asarray([]) if isinstance(value, float): term_mat = np.asarray([value]) doc_lens = np.asarray([0]) elif isinstance(value, Terms): term_mat = np.asarray([value.tf_to_dense(self.term_dict)]) doc_lens = np.asarray([value.doc_len]) is_encoded = value.encoded posns = [value.raw_positions(self.term_dict)] elif isinstance(value, np.ndarray): term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value]) doc_lens = np.asarray([x.doc_len for x in value]) is_encoded = value[0].encoded if len(value) > 0 else False posns = [x.raw_positions(self.term_dict) for x in value] np.nan_to_num(term_mat, copy=False, nan=0) self.term_mat[key] = term_mat self.doc_lens[key] = doc_lens if posns is not None: self.posns.insert(key, posns, is_encoded) # Assume we have a positions for each term, doc pair. We can just update it. # Otherwise we would have added new terms except TermMissingError: self._add_new_terms(key, value) def _add_new_terms(self, key, value): msg = """Adding new terms! This might not be good if you tokenized this new text with a different tokenizer. Also. This is slow.""" warnings.warn(msg) scan_value = value if isinstance(value, Terms): scan_value = np.asarray([value]) for row in scan_value: for term in row.terms(): self.term_dict.add_term(term[0]) self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict))) # Ensure posns_lookup has at least max self.posns self[key] = value def value_counts( self, dropna: bool = True, ): if dropna: counts = Counter(self[:]) counts.pop(Terms({}), None) else: counts = Counter(self[:]) return pd.Series(counts) def __len__(self): len_rval = len(self.term_mat.rows) return len_rval def __ne__(self, other): if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index): return NotImplemented return ~(self == other) def __eq__(self, other): """Return a boolean numpy array indicating elementwise equality.""" # When other is a dataframe or series, not implemented if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index): return NotImplemented # When other is an ExtensionArray if isinstance(other, SearchArray): if len(self) != len(other): return False elif len(other) == 0: return np.array([], dtype=bool) else: # Compatible term dicts, and same term freqs # (not looking at positions, maybe we should?) if self.term_dict.compatible(other.term_dict): return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens) else: return np.zeros(len(self), dtype=bool) # return np.array(self[:]) == np.array(other[:]) # When other is a scalar value elif isinstance(other, Terms): other = SearchArray([other], tokenizer=self.tokenizer) warnings.warn("Comparing a scalar value to a SearchArray. This is slow.") return np.array(self[:]) == np.array(other[:]) # When other is a sequence but not an ExtensionArray # its an array of dicts elif is_list_like(other): if len(self) != len(other): return False elif len(other) == 0: return np.array([], dtype=bool) # We actually don't know how it was tokenized other = SearchArray(other, tokenizer=self.tokenizer) return np.array(self[:]) == np.array(other[:]) # Return False where 'other' is neither the same length nor a scalar else: return np.full(len(self), False) def isna(self): # Every row with all 0s empties = self.doc_lens == 0 return empties def take(self, indices, allow_fill=False, fill_value=None): # Want to take rows of term freqs row_indices = np.arange(len(self.term_mat.rows)) # Take within the row indices themselves result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1) if allow_fill and -1 in result_indices: if fill_value is None or pd.isna(fill_value): fill_value = Terms({}, encoded=True) to_fill_mask = result_indices == -1 # This is slow as it rebuilds all the term dictionaries # on the subsequent assignment lines # However, this case tends to be the exception for # most dataframe operations taken = SearchArray([fill_value] * len(result_indices)) taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy() return taken else: taken = self[result_indices].copy() return taken def copy(self): postings_arr = SearchArray([], tokenizer=self.tokenizer) postings_arr.doc_lens = self.doc_lens.copy() postings_arr.term_mat = self.term_mat.copy() postings_arr.posns = self.posns postings_arr.term_dict = self.term_dict postings_arr.avg_doc_length = self.avg_doc_length if not self.avoid_copies: postings_arr.posns = self.posns.copy() postings_arr.term_dict = self.term_dict.copy() return postings_arr @classmethod def _concat_same_type(cls, to_concat): concatenated_data = np.concatenate([ea[:] for ea in to_concat]) return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer) @classmethod def _from_factorized(cls, values, original): return cls(values) def _values_for_factorize(self): """Return an array and missing value suitable for factorization (ie grouping).""" arr = np.asarray(self[:], dtype=object) return arr, Terms({}) def _check_token_arg(self, token): if isinstance(token, str): return token elif isinstance(token, list) and len(token) == 1: return token[0] elif isinstance(token, list): return token else: raise TypeError("Expected a string or list of strings for phrases") # *********************************************************** # Search functionality # *********************************************************** def termfreqs(self, token: Union[List[str], str]) -> np.ndarray: token = self._check_token_arg(token) if isinstance(token, list): return self.phrase_freq(token) try: term_id = self.term_dict.get_term_id(token) matches = np.zeros(len(self), dtype=int) slice_of_rows = None if self.term_mat.subset: slice_of_rows = self.term_mat.rows doc_ids, termfreqs = self.posns.termfreqs(term_id, doc_ids=slice_of_rows) mask = np.isin(self.term_mat.rows, doc_ids) matches[mask] = termfreqs return matches else: doc_ids, termfreqs = self.posns.termfreqs(term_id, doc_ids=slice_of_rows) matches[doc_ids] = termfreqs return matches except TermMissingError: return np.zeros(len(self), dtype=int) def docfreq(self, token: str) -> int: if not isinstance(token, str): raise TypeError("Expected a string") # Count number of rows where the term appears try: return self.posns.docfreq(self.term_dict.get_term_id(token)) except TermMissingError: return 0 def doclengths(self) -> np.ndarray: return self.doc_lens def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray: """Return a boolean numpy array indicating which elements contain the given term.""" token = self._check_token_arg(token) if isinstance(token, list): term_freq = self.phrase_freq(token) else: term_freq = self.termfreqs(token) return term_freq > 0 def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray: """Score each doc using a similarity function. Parameters ---------- token : str or list of str of what to search (already tokenized) similarity : How to score the documents. Default is BM25. """ # Get term freqs per token token = self._check_token_arg(token) # For expensive toknes, we compute doc freq first, so we # cache them in the DF cache, to let TF cache know it should be cached tokens_l = [token] if isinstance(token, str) else token all_dfs = np.asarray([self.docfreq(token) for token in tokens_l]) tfs = self.termfreqs(token) token = self._check_token_arg(token) doc_lens = self.doclengths() scores = similarity(term_freqs=tfs, doc_freqs=all_dfs, doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length, num_docs=len(self)) return scores def positions(self, token: str, key=None) -> List[np.ndarray]: """Return a list of lists of positions of the given term.""" term_id = self.term_dict.get_term_id(token) key = self.term_mat.rows[key] if key is not None else self.term_mat.rows posns = self.posns.positions(term_id, doc_ids=key) return posns def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray: """Return a mask on the postings array indicating which elements contain all terms.""" masks = [self.match(term) for term in tokens] mask = np.ones(len(self), dtype=bool) for curr_mask in masks: mask = mask & curr_mask return mask def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray: """Return a mask on the postings array indicating which elements contain all terms.""" masks = [self.match(term) for term in tokens] mask = np.sum(masks, axis=0) >= min_should_match return mask def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray: if slop == 1 and len(tokens) == len(set(tokens)): phrase_freqs = np.zeros(len(self)) try: doc_ids = self.term_mat.rows term_ids = [self.term_dict.get_term_id(token) for token in tokens] return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids, phrase_freqs=phrase_freqs) except TermMissingError: return phrase_freqs else: return self.phrase_freq_every_diff(tokens, slop=slop) def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray: if mask is None: mask = self.and_query(tokens) if np.sum(mask) == 0: return mask # Gather positions posns = [self.positions(token, mask) for token in tokens] phrase_freqs = np.zeros(len(self)) phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop) return phrase_freqs def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray: phrase_freqs = -np.ones(len(self)) mask = self.and_query(tokens) phrase_freqs[~mask] = 0 if np.sum(mask) == 0: return phrase_freqs term_posns = [self.positions(term, mask) for term in tokens] for width in [10, 20, 30, 40]: phrase_freqs[mask] = compute_phrase_freqs(term_posns, phrase_freqs[mask], slop=slop, width=width) remaining_mask = phrase_freqs == -1 if np.any(remaining_mask): remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop) phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask] return phrase_freqs
evocodebench_data_268
"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse_min_should_match(num_clauses: int, spec: str) -> int: """Parse Solr's min should match (ie mm) spec. See this ChatGPT translation of mm code from Solr's Java code for parsing this https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb Parameters ---------- num_clauses : int spec : str Returns ------- int : the number of clauses that must match """ def checked_parse_int(value, error_message): try: return int(value) except ValueError: raise ValueError(error_message) result = num_clauses spec = spec.strip() if '<' in spec: # we have conditional spec(s) space_around_less_than_pattern = re.compile(r'\s*<\s*') spec = space_around_less_than_pattern.sub('<', spec) for s in spec.split(): parts = s.split('<', 1) if len(parts) < 2: raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'") upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.") if num_clauses <= upper_bound: return result else: result = parse_min_should_match(num_clauses, parts[1]) return result # otherwise, simple expression if '%' in spec: # percentage - assume the % was the last char. If not, let int() fail. spec = spec[:-1] percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") calc = (result * percent) * (1 / 100) result = result + int(calc) if calc < 0 else int(calc) else: calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") result = result + calc if calc < 0 else calc return min(num_clauses, max(result, 0)) def parse_field_boosts(field_lists: List[str]) -> dict: """Parse Solr's qf, pf, pf2, pf3 field boosts.""" if not field_lists: return {} out = {} carat_pattern = re.compile(r'\^') for field in field_lists: parts = carat_pattern.split(field) out[parts[0]] = None if len(parts) == 1 else float(parts[1]) return out def get_field(frame, field) -> SearchArray: if field not in frame.columns: raise ValueError(f"Field {field} not in dataframe") if not isinstance(frame[field].array, SearchArray): raise ValueError(f"Field {field} is not a searcharray field") return frame[field].array def parse_query_terms(frame: pd.DataFrame, query: str, query_fields: List[str]): search_terms: Dict[str, List[str]] = {} num_search_terms = 0 term_centric = True for field in query_fields: arr = get_field(frame, field) tokenizer = arr.tokenizer search_terms[field] = [] field_num_search_terms = 0 for posn, term in enumerate(tokenizer(query)): search_terms[field].append(term) field_num_search_terms += 1 if num_search_terms == 0: num_search_terms = field_num_search_terms elif field_num_search_terms != num_search_terms: term_centric = False return num_search_terms, search_terms, term_centric def _edismax_term_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity) -> Tuple[np.ndarray, str]: explain = [] term_scores = [] for term_posn in range(num_search_terms): max_scores = np.zeros(len(frame)) term_explain = [] for field, boost in query_fields.items(): term = search_terms[field][term_posn] post_arr = get_field(frame, field) field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" term_explain.append(f"{field}:{term}^{boost_exp}") max_scores = np.maximum(max_scores, field_term_score) term_scores.append(max_scores) explain.append("(" + " | ".join(term_explain) + ")") min_should_match = parse_min_should_match(num_search_terms, spec=mm) qf_scores = np.asarray(term_scores) matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match qf_scores = np.sum(term_scores, axis=0) qf_scores[~matches_gt_mm] = 0 return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}" def _edismax_field_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: field_scores = [] explain = [] for field, boost in query_fields.items(): post_arr = get_field(frame, field) term_scores = np.array([post_arr.score(term, similarity=similarity) for term in search_terms[field]]) min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm) exp = " ".join([f"{field}:{term}" for term in search_terms[field]]) boost_exp = f"{boost}" if boost is not None else "1" exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}" exp = "(" + exp + f")^{boost_exp}" matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field])) sum_terms_bm25 = np.sum(term_scores, axis=0) sum_terms_bm25[~matches_gt_mm] = 0 field_scores.append(sum_terms_bm25 * (1 if boost is None else boost)) explain.append(exp) # Take maximum field scores as qf qf_scores = np.asarray(field_scores) qf_scores = np.max(qf_scores, axis=0) return qf_scores, " | ".join(explain) def edismax(frame: pd.DataFrame, q: str, qf: List[str], mm: Optional[str] = None, pf: Optional[List[str]] = None, pf2: Optional[List[str]] = None, pf3: Optional[List[str]] = None, q_op: str = "OR", similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: """Run edismax search over dataframe with searcharray fields. Parameters ---------- q : str The query string mm : str The minimum should match spec qf : list The fields to search pf : list The fields to search for phrase matches pf2 : list The fields to search for bigram matches pf3 : list The fields to search for trigram matches q_op : str, optional The default operator, by default "OR" Returns ------- np.ndarray The search results """ def listify(x): return x if isinstance(x, list) else [x] query_fields = parse_field_boosts(listify(qf)) phrase_fields = parse_field_boosts(listify(pf)) if pf else {} if mm is None: mm = "1" if q_op == "AND": mm = "100%" # bigram_fields = parse_field_boosts(pf2) if pf2 else {} # trigram_fields = parse_field_boosts(pf3) if pf3 else {} num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys())) if term_centric: qf_scores, explain = _edismax_term_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) else: qf_scores, explain = _edismax_field_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) phrase_scores = [] for field, boost in phrase_fields.items(): arr = get_field(frame, field) terms = search_terms[field] field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}" phrase_scores.append(field_phrase_score) if len(phrase_scores) > 0: phrase_scores = np.sum(phrase_scores, axis=0) # Add where term_scores > 0 term_match_idx = np.where(qf_scores)[0] qf_scores[term_match_idx] += phrase_scores[term_match_idx] return qf_scores, explain
evocodebench_data_269
"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take import json from collections import Counter import warnings import logging from typing import List, Union, Optional, Iterable import numpy as np from searcharray.phrase.scan_merge import scan_merge_ins from searcharray.phrase.posn_diffs import compute_phrase_freqs from searcharray.phrase.middle_out import PosnBitArray from searcharray.similarity import Similarity, default_bm25 from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list from searcharray.term_dict import TermMissingError logger = logging.getLogger(__name__) # When running in pytest import sys # noqa handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.ERROR) formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.ERROR) class Terms: """An indexed search doc - a single bag of tokenized words and positions.""" def __init__(self, postings, doc_len: int = 0, posns: Optional[dict] = None, encoded=False): self.postings = postings self.posns = None self.encoded = encoded self.doc_len = doc_len self.posns = posns def _validate_posns(self): # (For testing/assertions) - Confirm every term in positions also in postings if self.posns is None: return for term in self.posns: if term not in self.postings: raise ValueError(f"Term {term} in positions but not in postings. ") def termfreq(self, token): return self.postings[token] def terms(self): return self.postings.items() def positions(self, term=None): if self.posns is None: return {} if term is None: posns = self.posns.items() else: posns = self.posns[term] return posns def raw_positions(self, term_dict, term=None): if self.posns is None: return {} if term is None: posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()] else: posns = [(term_dict.get_term_id(term), self.posns[term])] return posns def tf_to_dense(self, term_dict): """Convert to a dense vector of term frequencies.""" dense = np.zeros(len(term_dict)) for term, freq in self.terms(): dense[term_dict.get_term_id(term)] = freq return dense def __len__(self): return len(self.postings) def __repr__(self): posting_keys = set(self.postings.keys()) rval = f"Terms({posting_keys})" return rval def __str__(self): return repr(self) def __eq__(self, other): # Flip to the other implementation if we're comparing to a SearchArray # to get a boolean array back if isinstance(other, SearchArray): return other == self same_postings = isinstance(other, Terms) and self.postings == other.postings if same_postings and self.doc_len == other.doc_len: return True def __lt__(self, other): # return isinstance(other, Terms) and hash(self) < hash(other) keys_both = set(self.postings.keys()).union(set(other.postings.keys())) # Sort lexically keys_both = sorted(keys_both) # Iterate as if these are two vectors of the same large dimensional vector sparse for key in keys_both: lhs_val = 0 rhs_val = 0 try: lhs_val = self.postings[key] except KeyError: pass try: rhs_val = other.postings[key] except KeyError: pass if lhs_val < rhs_val: return True elif lhs_val > rhs_val: return False else: continue return False def __le__(self, other): return self < other or self == other def __gt__(self, other): return not (self < other) and self != other def __hash__(self): return hash(json.dumps(self.postings, sort_keys=True)) class TermsDtype(ExtensionDtype): """Pandas dtype for terms.""" name = 'tokenized_text' type = Terms kind = 'O' @classmethod def construct_from_string(cls, string): if not isinstance(string, str): raise TypeError( "'construct_from_string' expects a string, got {}".format(type(string)) ) elif string == cls.name: return cls() else: raise TypeError( "Cannot construct a '{}' from '{}'".format(cls.__name__, string) ) @classmethod def construct_array_type(cls): return SearchArray def __repr__(self): return 'TermsDtype()' @property def na_value(self): return Terms({}) def valid_value(self, value): return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms) register_extension_dtype(TermsDtype) def ws_tokenizer(string): if pd.isna(string): return [] if not isinstance(string, str): raise ValueError("Expected a string") return string.split() def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray): tfs = {} labeled_posns = {} for term_idx in row.cols: term = term_dict.get_term(term_idx) tfs[term] = 1 enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id) labeled_posns[term] = enc_term_posns result = Terms(tfs, posns=labeled_posns, doc_len=doc_len, encoded=True) return result class SearchArray(ExtensionArray): """An array of tokenized text (Termss).""" dtype = TermsDtype() def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True): # Check dtype, raise TypeError if not is_list_like(postings): raise TypeError("Expected list-like object, got {}".format(type(postings))) self.avoid_copies = avoid_copies self.tokenizer = tokenizer self.term_mat, self.posns, \ self.term_dict, self.avg_doc_length, \ self.doc_lens = build_index_from_terms_list(postings, Terms) @classmethod def index(cls, array: Iterable, tokenizer=ws_tokenizer, truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray': """Index an array of strings using tokenizer.""" if not is_list_like(array): raise TypeError("Expected list-like object, got {}".format(type(array))) term_mat, posns, term_dict, avg_doc_length, doc_lens =\ build_index_from_tokenizer(array, tokenizer, batch_size=batch_size, truncate=truncate) postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies) postings.term_mat = term_mat postings.posns = posns postings.term_dict = term_dict postings.avg_doc_length = avg_doc_length postings.doc_lens = doc_lens return postings @classmethod def _from_sequence(cls, scalars, dtype=None, copy=False): """Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into).""" if dtype is not None: if not isinstance(dtype, TermsDtype): return scalars if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype(): return cls(scalars) # String types elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US': return cls(scalars) # Other objects elif isinstance(scalars, np.ndarray) and scalars.dtype != object: return scalars return cls(scalars) def memory_usage(self, deep=False): """Return memory usage of this array in bytes.""" return self.nbytes @property def nbytes(self): return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes def __getitem__(self, key): key = pd.api.indexers.check_array_indexer(self, key) # Want to take rows of term freqs if isinstance(key, numbers.Integral): try: rows = self.term_mat[key] doc_len = self.doc_lens[key] doc_id = key if doc_id < 0: doc_id += len(self) return _row_to_postings_row(doc_id, rows[0], doc_len, self.term_dict, self.posns) except IndexError: raise IndexError("index out of bounds") else: # Construct a sliced view of this array sliced_tfs = self.term_mat.slice(key) sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns arr = SearchArray([], tokenizer=self.tokenizer) arr.term_mat = sliced_tfs arr.doc_lens = self.doc_lens[key] arr.posns = sliced_posns arr.term_dict = self.term_dict arr.avg_doc_length = self.avg_doc_length return arr def __setitem__(self, key, value): """Set an item in the array.""" key = pd.api.indexers.check_array_indexer(self, key) if isinstance(value, pd.Series): value = value.values if isinstance(value, pd.DataFrame): value = value.values.flatten() if isinstance(value, SearchArray): value = value.to_numpy() if isinstance(value, list): value = np.asarray(value, dtype=object) if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value): raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}") # Cant set a single value to an array if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray): raise ValueError("Cannot set a single value to an array") try: is_encoded = False posns = None term_mat = np.asarray([]) doc_lens = np.asarray([]) if isinstance(value, float): term_mat = np.asarray([value]) doc_lens = np.asarray([0]) elif isinstance(value, Terms): term_mat = np.asarray([value.tf_to_dense(self.term_dict)]) doc_lens = np.asarray([value.doc_len]) is_encoded = value.encoded posns = [value.raw_positions(self.term_dict)] elif isinstance(value, np.ndarray): term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value]) doc_lens = np.asarray([x.doc_len for x in value]) is_encoded = value[0].encoded if len(value) > 0 else False posns = [x.raw_positions(self.term_dict) for x in value] np.nan_to_num(term_mat, copy=False, nan=0) self.term_mat[key] = term_mat self.doc_lens[key] = doc_lens if posns is not None: self.posns.insert(key, posns, is_encoded) # Assume we have a positions for each term, doc pair. We can just update it. # Otherwise we would have added new terms except TermMissingError: self._add_new_terms(key, value) def _add_new_terms(self, key, value): msg = """Adding new terms! This might not be good if you tokenized this new text with a different tokenizer. Also. This is slow.""" warnings.warn(msg) scan_value = value if isinstance(value, Terms): scan_value = np.asarray([value]) for row in scan_value: for term in row.terms(): self.term_dict.add_term(term[0]) self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict))) # Ensure posns_lookup has at least max self.posns self[key] = value def value_counts( self, dropna: bool = True, ): if dropna: counts = Counter(self[:]) counts.pop(Terms({}), None) else: counts = Counter(self[:]) return pd.Series(counts) def __len__(self): len_rval = len(self.term_mat.rows) return len_rval def __ne__(self, other): if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index): return NotImplemented return ~(self == other) def __eq__(self, other): """Return a boolean numpy array indicating elementwise equality.""" # When other is a dataframe or series, not implemented if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index): return NotImplemented # When other is an ExtensionArray if isinstance(other, SearchArray): if len(self) != len(other): return False elif len(other) == 0: return np.array([], dtype=bool) else: # Compatible term dicts, and same term freqs # (not looking at positions, maybe we should?) if self.term_dict.compatible(other.term_dict): return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens) else: return np.zeros(len(self), dtype=bool) # return np.array(self[:]) == np.array(other[:]) # When other is a scalar value elif isinstance(other, Terms): other = SearchArray([other], tokenizer=self.tokenizer) warnings.warn("Comparing a scalar value to a SearchArray. This is slow.") return np.array(self[:]) == np.array(other[:]) # When other is a sequence but not an ExtensionArray # its an array of dicts elif is_list_like(other): if len(self) != len(other): return False elif len(other) == 0: return np.array([], dtype=bool) # We actually don't know how it was tokenized other = SearchArray(other, tokenizer=self.tokenizer) return np.array(self[:]) == np.array(other[:]) # Return False where 'other' is neither the same length nor a scalar else: return np.full(len(self), False) def isna(self): # Every row with all 0s empties = self.doc_lens == 0 return empties def take(self, indices, allow_fill=False, fill_value=None): # Want to take rows of term freqs row_indices = np.arange(len(self.term_mat.rows)) # Take within the row indices themselves result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1) if allow_fill and -1 in result_indices: if fill_value is None or pd.isna(fill_value): fill_value = Terms({}, encoded=True) to_fill_mask = result_indices == -1 # This is slow as it rebuilds all the term dictionaries # on the subsequent assignment lines # However, this case tends to be the exception for # most dataframe operations taken = SearchArray([fill_value] * len(result_indices)) taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy() return taken else: taken = self[result_indices].copy() return taken def copy(self): postings_arr = SearchArray([], tokenizer=self.tokenizer) postings_arr.doc_lens = self.doc_lens.copy() postings_arr.term_mat = self.term_mat.copy() postings_arr.posns = self.posns postings_arr.term_dict = self.term_dict postings_arr.avg_doc_length = self.avg_doc_length if not self.avoid_copies: postings_arr.posns = self.posns.copy() postings_arr.term_dict = self.term_dict.copy() return postings_arr @classmethod def _concat_same_type(cls, to_concat): concatenated_data = np.concatenate([ea[:] for ea in to_concat]) return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer) @classmethod def _from_factorized(cls, values, original): return cls(values) def _values_for_factorize(self): """Return an array and missing value suitable for factorization (ie grouping).""" arr = np.asarray(self[:], dtype=object) return arr, Terms({}) def _check_token_arg(self, token): if isinstance(token, str): return token elif isinstance(token, list) and len(token) == 1: return token[0] elif isinstance(token, list): return token else: raise TypeError("Expected a string or list of strings for phrases") # *********************************************************** # Search functionality # *********************************************************** def termfreqs(self, token: Union[List[str], str]) -> np.ndarray: token = self._check_token_arg(token) if isinstance(token, list): return self.phrase_freq(token) try: term_id = self.term_dict.get_term_id(token) matches = np.zeros(len(self), dtype=int) slice_of_rows = None if self.term_mat.subset: slice_of_rows = self.term_mat.rows doc_ids, termfreqs = self.posns.termfreqs(term_id, doc_ids=slice_of_rows) mask = np.isin(self.term_mat.rows, doc_ids) matches[mask] = termfreqs return matches else: doc_ids, termfreqs = self.posns.termfreqs(term_id, doc_ids=slice_of_rows) matches[doc_ids] = termfreqs return matches except TermMissingError: return np.zeros(len(self), dtype=int) def docfreq(self, token: str) -> int: if not isinstance(token, str): raise TypeError("Expected a string") # Count number of rows where the term appears try: return self.posns.docfreq(self.term_dict.get_term_id(token)) except TermMissingError: return 0 def doclengths(self) -> np.ndarray: return self.doc_lens def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray: """Return a boolean numpy array indicating which elements contain the given term.""" token = self._check_token_arg(token) if isinstance(token, list): term_freq = self.phrase_freq(token) else: term_freq = self.termfreqs(token) return term_freq > 0 def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray: """Score each doc using a similarity function. Parameters ---------- token : str or list of str of what to search (already tokenized) similarity : How to score the documents. Default is BM25. """ # Get term freqs per token token = self._check_token_arg(token) # For expensive toknes, we compute doc freq first, so we # cache them in the DF cache, to let TF cache know it should be cached tokens_l = [token] if isinstance(token, str) else token all_dfs = np.asarray([self.docfreq(token) for token in tokens_l]) tfs = self.termfreqs(token) token = self._check_token_arg(token) doc_lens = self.doclengths() scores = similarity(term_freqs=tfs, doc_freqs=all_dfs, doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length, num_docs=len(self)) return scores def positions(self, token: str, key=None) -> List[np.ndarray]: """Return a list of lists of positions of the given term.""" term_id = self.term_dict.get_term_id(token) key = self.term_mat.rows[key] if key is not None else self.term_mat.rows posns = self.posns.positions(term_id, doc_ids=key) return posns def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray: """Return a mask on the postings array indicating which elements contain all terms.""" masks = [self.match(term) for term in tokens] mask = np.ones(len(self), dtype=bool) for curr_mask in masks: mask = mask & curr_mask return mask def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray: """Return a mask on the postings array indicating which elements contain all terms.""" masks = [self.match(term) for term in tokens] mask = np.sum(masks, axis=0) >= min_should_match return mask def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray: if slop == 1 and len(tokens) == len(set(tokens)): phrase_freqs = np.zeros(len(self)) try: doc_ids = self.term_mat.rows term_ids = [self.term_dict.get_term_id(token) for token in tokens] return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids, phrase_freqs=phrase_freqs) except TermMissingError: return phrase_freqs else: return self.phrase_freq_every_diff(tokens, slop=slop) def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray: if mask is None: mask = self.and_query(tokens) if np.sum(mask) == 0: return mask # Gather positions posns = [self.positions(token, mask) for token in tokens] phrase_freqs = np.zeros(len(self)) phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop) return phrase_freqs def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray: phrase_freqs = -np.ones(len(self)) mask = self.and_query(tokens) phrase_freqs[~mask] = 0 if np.sum(mask) == 0: return phrase_freqs term_posns = [self.positions(term, mask) for term in tokens] for width in [10, 20, 30, 40]: phrase_freqs[mask] = compute_phrase_freqs(term_posns, phrase_freqs[mask], slop=slop, width=width) remaining_mask = phrase_freqs == -1 if np.any(remaining_mask): remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop) phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask] return phrase_freqs
evocodebench_data_270
"""Tokenized, searchable text as a pandas dtype.""" import pandas as pd import numbers from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype from pandas.api.types import is_list_like from pandas.api.extensions import take import json from collections import Counter import warnings import logging from typing import List, Union, Optional, Iterable import numpy as np from searcharray.phrase.scan_merge import scan_merge_ins from searcharray.phrase.posn_diffs import compute_phrase_freqs from searcharray.phrase.middle_out import PosnBitArray from searcharray.similarity import Similarity, default_bm25 from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list from searcharray.term_dict import TermMissingError logger = logging.getLogger(__name__) # When running in pytest import sys # noqa handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.ERROR) formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.ERROR) class Terms: """An indexed search doc - a single bag of tokenized words and positions.""" def __init__(self, postings, doc_len: int = 0, posns: Optional[dict] = None, encoded=False): self.postings = postings self.posns = None self.encoded = encoded self.doc_len = doc_len self.posns = posns def _validate_posns(self): # (For testing/assertions) - Confirm every term in positions also in postings if self.posns is None: return for term in self.posns: if term not in self.postings: raise ValueError(f"Term {term} in positions but not in postings. ") def termfreq(self, token): return self.postings[token] def terms(self): return self.postings.items() def positions(self, term=None): if self.posns is None: return {} if term is None: posns = self.posns.items() else: posns = self.posns[term] return posns def raw_positions(self, term_dict, term=None): if self.posns is None: return {} if term is None: posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()] else: posns = [(term_dict.get_term_id(term), self.posns[term])] return posns def tf_to_dense(self, term_dict): """Convert to a dense vector of term frequencies.""" dense = np.zeros(len(term_dict)) for term, freq in self.terms(): dense[term_dict.get_term_id(term)] = freq return dense def __len__(self): return len(self.postings) def __repr__(self): posting_keys = set(self.postings.keys()) rval = f"Terms({posting_keys})" return rval def __str__(self): return repr(self) def __eq__(self, other): # Flip to the other implementation if we're comparing to a SearchArray # to get a boolean array back if isinstance(other, SearchArray): return other == self same_postings = isinstance(other, Terms) and self.postings == other.postings if same_postings and self.doc_len == other.doc_len: return True def __lt__(self, other): # return isinstance(other, Terms) and hash(self) < hash(other) keys_both = set(self.postings.keys()).union(set(other.postings.keys())) # Sort lexically keys_both = sorted(keys_both) # Iterate as if these are two vectors of the same large dimensional vector sparse for key in keys_both: lhs_val = 0 rhs_val = 0 try: lhs_val = self.postings[key] except KeyError: pass try: rhs_val = other.postings[key] except KeyError: pass if lhs_val < rhs_val: return True elif lhs_val > rhs_val: return False else: continue return False def __le__(self, other): return self < other or self == other def __gt__(self, other): return not (self < other) and self != other def __hash__(self): return hash(json.dumps(self.postings, sort_keys=True)) class TermsDtype(ExtensionDtype): """Pandas dtype for terms.""" name = 'tokenized_text' type = Terms kind = 'O' @classmethod def construct_from_string(cls, string): if not isinstance(string, str): raise TypeError( "'construct_from_string' expects a string, got {}".format(type(string)) ) elif string == cls.name: return cls() else: raise TypeError( "Cannot construct a '{}' from '{}'".format(cls.__name__, string) ) @classmethod def construct_array_type(cls): return SearchArray def __repr__(self): return 'TermsDtype()' @property def na_value(self): return Terms({}) def valid_value(self, value): return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms) register_extension_dtype(TermsDtype) def ws_tokenizer(string): if pd.isna(string): return [] if not isinstance(string, str): raise ValueError("Expected a string") return string.split() def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray): tfs = {} labeled_posns = {} for term_idx in row.cols: term = term_dict.get_term(term_idx) tfs[term] = 1 enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id) labeled_posns[term] = enc_term_posns result = Terms(tfs, posns=labeled_posns, doc_len=doc_len, encoded=True) return result class SearchArray(ExtensionArray): """An array of tokenized text (Termss).""" dtype = TermsDtype() def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True): # Check dtype, raise TypeError if not is_list_like(postings): raise TypeError("Expected list-like object, got {}".format(type(postings))) self.avoid_copies = avoid_copies self.tokenizer = tokenizer self.term_mat, self.posns, \ self.term_dict, self.avg_doc_length, \ self.doc_lens = build_index_from_terms_list(postings, Terms) @classmethod def index(cls, array: Iterable, tokenizer=ws_tokenizer, truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray': """Index an array of strings using tokenizer.""" if not is_list_like(array): raise TypeError("Expected list-like object, got {}".format(type(array))) term_mat, posns, term_dict, avg_doc_length, doc_lens =\ build_index_from_tokenizer(array, tokenizer, batch_size=batch_size, truncate=truncate) postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies) postings.term_mat = term_mat postings.posns = posns postings.term_dict = term_dict postings.avg_doc_length = avg_doc_length postings.doc_lens = doc_lens return postings @classmethod def _from_sequence(cls, scalars, dtype=None, copy=False): """Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into).""" if dtype is not None: if not isinstance(dtype, TermsDtype): return scalars if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype(): return cls(scalars) # String types elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US': return cls(scalars) # Other objects elif isinstance(scalars, np.ndarray) and scalars.dtype != object: return scalars return cls(scalars) def memory_usage(self, deep=False): """Return memory usage of this array in bytes.""" return self.nbytes @property def nbytes(self): return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes def __getitem__(self, key): key = pd.api.indexers.check_array_indexer(self, key) # Want to take rows of term freqs if isinstance(key, numbers.Integral): try: rows = self.term_mat[key] doc_len = self.doc_lens[key] doc_id = key if doc_id < 0: doc_id += len(self) return _row_to_postings_row(doc_id, rows[0], doc_len, self.term_dict, self.posns) except IndexError: raise IndexError("index out of bounds") else: # Construct a sliced view of this array sliced_tfs = self.term_mat.slice(key) sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns arr = SearchArray([], tokenizer=self.tokenizer) arr.term_mat = sliced_tfs arr.doc_lens = self.doc_lens[key] arr.posns = sliced_posns arr.term_dict = self.term_dict arr.avg_doc_length = self.avg_doc_length return arr def __setitem__(self, key, value): """Set an item in the array.""" key = pd.api.indexers.check_array_indexer(self, key) if isinstance(value, pd.Series): value = value.values if isinstance(value, pd.DataFrame): value = value.values.flatten() if isinstance(value, SearchArray): value = value.to_numpy() if isinstance(value, list): value = np.asarray(value, dtype=object) if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value): raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}") # Cant set a single value to an array if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray): raise ValueError("Cannot set a single value to an array") try: is_encoded = False posns = None term_mat = np.asarray([]) doc_lens = np.asarray([]) if isinstance(value, float): term_mat = np.asarray([value]) doc_lens = np.asarray([0]) elif isinstance(value, Terms): term_mat = np.asarray([value.tf_to_dense(self.term_dict)]) doc_lens = np.asarray([value.doc_len]) is_encoded = value.encoded posns = [value.raw_positions(self.term_dict)] elif isinstance(value, np.ndarray): term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value]) doc_lens = np.asarray([x.doc_len for x in value]) is_encoded = value[0].encoded if len(value) > 0 else False posns = [x.raw_positions(self.term_dict) for x in value] np.nan_to_num(term_mat, copy=False, nan=0) self.term_mat[key] = term_mat self.doc_lens[key] = doc_lens if posns is not None: self.posns.insert(key, posns, is_encoded) # Assume we have a positions for each term, doc pair. We can just update it. # Otherwise we would have added new terms except TermMissingError: self._add_new_terms(key, value) def _add_new_terms(self, key, value): msg = """Adding new terms! This might not be good if you tokenized this new text with a different tokenizer. Also. This is slow.""" warnings.warn(msg) scan_value = value if isinstance(value, Terms): scan_value = np.asarray([value]) for row in scan_value: for term in row.terms(): self.term_dict.add_term(term[0]) self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict))) # Ensure posns_lookup has at least max self.posns self[key] = value def value_counts( self, dropna: bool = True, ): if dropna: counts = Counter(self[:]) counts.pop(Terms({}), None) else: counts = Counter(self[:]) return pd.Series(counts) def __len__(self): len_rval = len(self.term_mat.rows) return len_rval def __ne__(self, other): if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index): return NotImplemented return ~(self == other) def __eq__(self, other): """Return a boolean numpy array indicating elementwise equality.""" # When other is a dataframe or series, not implemented if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index): return NotImplemented # When other is an ExtensionArray if isinstance(other, SearchArray): if len(self) != len(other): return False elif len(other) == 0: return np.array([], dtype=bool) else: # Compatible term dicts, and same term freqs # (not looking at positions, maybe we should?) if self.term_dict.compatible(other.term_dict): return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens) else: return np.zeros(len(self), dtype=bool) # return np.array(self[:]) == np.array(other[:]) # When other is a scalar value elif isinstance(other, Terms): other = SearchArray([other], tokenizer=self.tokenizer) warnings.warn("Comparing a scalar value to a SearchArray. This is slow.") return np.array(self[:]) == np.array(other[:]) # When other is a sequence but not an ExtensionArray # its an array of dicts elif is_list_like(other): if len(self) != len(other): return False elif len(other) == 0: return np.array([], dtype=bool) # We actually don't know how it was tokenized other = SearchArray(other, tokenizer=self.tokenizer) return np.array(self[:]) == np.array(other[:]) # Return False where 'other' is neither the same length nor a scalar else: return np.full(len(self), False) def isna(self): # Every row with all 0s empties = self.doc_lens == 0 return empties def take(self, indices, allow_fill=False, fill_value=None): # Want to take rows of term freqs row_indices = np.arange(len(self.term_mat.rows)) # Take within the row indices themselves result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1) if allow_fill and -1 in result_indices: if fill_value is None or pd.isna(fill_value): fill_value = Terms({}, encoded=True) to_fill_mask = result_indices == -1 # This is slow as it rebuilds all the term dictionaries # on the subsequent assignment lines # However, this case tends to be the exception for # most dataframe operations taken = SearchArray([fill_value] * len(result_indices)) taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy() return taken else: taken = self[result_indices].copy() return taken def copy(self): postings_arr = SearchArray([], tokenizer=self.tokenizer) postings_arr.doc_lens = self.doc_lens.copy() postings_arr.term_mat = self.term_mat.copy() postings_arr.posns = self.posns postings_arr.term_dict = self.term_dict postings_arr.avg_doc_length = self.avg_doc_length if not self.avoid_copies: postings_arr.posns = self.posns.copy() postings_arr.term_dict = self.term_dict.copy() return postings_arr @classmethod def _concat_same_type(cls, to_concat): concatenated_data = np.concatenate([ea[:] for ea in to_concat]) return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer) @classmethod def _from_factorized(cls, values, original): return cls(values) def _values_for_factorize(self): """Return an array and missing value suitable for factorization (ie grouping).""" arr = np.asarray(self[:], dtype=object) return arr, Terms({}) def _check_token_arg(self, token): if isinstance(token, str): return token elif isinstance(token, list) and len(token) == 1: return token[0] elif isinstance(token, list): return token else: raise TypeError("Expected a string or list of strings for phrases") # *********************************************************** # Search functionality # *********************************************************** def termfreqs(self, token: Union[List[str], str]) -> np.ndarray: token = self._check_token_arg(token) if isinstance(token, list): return self.phrase_freq(token) try: term_id = self.term_dict.get_term_id(token) matches = np.zeros(len(self), dtype=int) slice_of_rows = None if self.term_mat.subset: slice_of_rows = self.term_mat.rows doc_ids, termfreqs = self.posns.termfreqs(term_id, doc_ids=slice_of_rows) mask = np.isin(self.term_mat.rows, doc_ids) matches[mask] = termfreqs return matches else: doc_ids, termfreqs = self.posns.termfreqs(term_id, doc_ids=slice_of_rows) matches[doc_ids] = termfreqs return matches except TermMissingError: return np.zeros(len(self), dtype=int) def docfreq(self, token: str) -> int: if not isinstance(token, str): raise TypeError("Expected a string") # Count number of rows where the term appears try: return self.posns.docfreq(self.term_dict.get_term_id(token)) except TermMissingError: return 0 def doclengths(self) -> np.ndarray: return self.doc_lens def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray: """Return a boolean numpy array indicating which elements contain the given term.""" token = self._check_token_arg(token) if isinstance(token, list): term_freq = self.phrase_freq(token) else: term_freq = self.termfreqs(token) return term_freq > 0 def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray: """Score each doc using a similarity function. Parameters ---------- token : str or list of str of what to search (already tokenized) similarity : How to score the documents. Default is BM25. """ # Get term freqs per token token = self._check_token_arg(token) # For expensive toknes, we compute doc freq first, so we # cache them in the DF cache, to let TF cache know it should be cached tokens_l = [token] if isinstance(token, str) else token all_dfs = np.asarray([self.docfreq(token) for token in tokens_l]) tfs = self.termfreqs(token) token = self._check_token_arg(token) doc_lens = self.doclengths() scores = similarity(term_freqs=tfs, doc_freqs=all_dfs, doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length, num_docs=len(self)) return scores def positions(self, token: str, key=None) -> List[np.ndarray]: """Return a list of lists of positions of the given term.""" term_id = self.term_dict.get_term_id(token) key = self.term_mat.rows[key] if key is not None else self.term_mat.rows posns = self.posns.positions(term_id, doc_ids=key) return posns def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray: """Return a mask on the postings array indicating which elements contain all terms.""" masks = [self.match(term) for term in tokens] mask = np.ones(len(self), dtype=bool) for curr_mask in masks: mask = mask & curr_mask return mask def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray: """Return a mask on the postings array indicating which elements contain all terms.""" masks = [self.match(term) for term in tokens] mask = np.sum(masks, axis=0) >= min_should_match return mask def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray: if slop == 1 and len(tokens) == len(set(tokens)): phrase_freqs = np.zeros(len(self)) try: doc_ids = self.term_mat.rows term_ids = [self.term_dict.get_term_id(token) for token in tokens] return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids, phrase_freqs=phrase_freqs) except TermMissingError: return phrase_freqs else: return self.phrase_freq_every_diff(tokens, slop=slop) def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray: if mask is None: mask = self.and_query(tokens) if np.sum(mask) == 0: return mask # Gather positions posns = [self.positions(token, mask) for token in tokens] phrase_freqs = np.zeros(len(self)) phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop) return phrase_freqs def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray: phrase_freqs = -np.ones(len(self)) mask = self.and_query(tokens) phrase_freqs[~mask] = 0 if np.sum(mask) == 0: return phrase_freqs term_posns = [self.positions(term, mask) for term in tokens] for width in [10, 20, 30, 40]: phrase_freqs[mask] = compute_phrase_freqs(term_posns, phrase_freqs[mask], slop=slop, width=width) remaining_mask = phrase_freqs == -1 if np.any(remaining_mask): remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop) phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask] return phrase_freqs
evocodebench_data_271
import threading from deluder.common import * from deluder.interceptor import MessageInterceptor from deluder.interceptors.proxifier.strategy import * from deluder.interceptors.proxifier.connection import DEFAULT_CONNECTION_ID, Connection from deluder.interceptors.proxifier.server import Server class ProxifierMessageInterceptor(MessageInterceptor): """ Proxifier interceptor allows sending Deluder messages through TCP proxies using multiple strategies """ server: Server connections: Dict[str, Connection] lock: threading.Lock @classmethod def default_config(cls) -> dict: return { 'proxyHost': '127.0.0.1', 'proxyPort': 8888, 'serverHost': '127.0.0.1', 'serverPort': 25500, 'strategy': 'length', 'strategies': { 'buffer': { 'bufferSize': 65536, }, 'suffix': { 'bufferSize': 65536, 'value': '[D_END]', }, 'length': { }, }, 'autoCloseConnections': True, 'multipleConnections': True, } def init(self): self.server = Server( server_host=self.config['serverHost'], server_port=self.config['serverPort'], proxy_host=self.config['proxyHost'], proxy_port=self.config['proxyPort'], strategy=ProxifierStrategyType(self.config['strategy']), strategy_config=self.config['strategies'].get(self.config['strategy'], {}), logger=self.logger ) self.server.start() self.connections = {} self.lock = threading.Lock() def intercept(self, process: Process, message: Message): if isinstance(message, SendMessage): message.data = self._get_connection(message).c2s(message.data) elif isinstance(message, RecvMessage): message.data = self._get_connection(message).s2c(message.data) elif isinstance(message, CloseMessage): self._handle_connection_close_message(message) def destroy(self): if hasattr(self, 'connections'): for connection in self.connections.values(): connection.stop() if hasattr(self, 'server'): self.server.stop() def _extract_connection_id(self, message: Message) -> str: if self.config['multipleConnections'] == True: return message.metadata.get(MetadataType.CONNECTION_ID, DEFAULT_CONNECTION_ID) return DEFAULT_CONNECTION_ID def _get_connection(self, message: Message) -> Connection: with self.lock: # Determine connection identifier connection_id = self._extract_connection_id(message) # Get connection or create a new one if it does not exist connection = self.connections.get(connection_id) if connection is None: connection = self.server.connect() connection.set_info(connection_id) connection.start() self.connections[connection_id] = connection return connection def _handle_connection_close_message(self, message: Message): if self.config['autoCloseConnections'] is False: return # Automatic closing of connection is disabled with self.lock: # Determine connection identifier connection_id = self._extract_connection_id(message) if connection_id is None or connection_id == DEFAULT_CONNECTION_ID: return # Do not close default connection connection = self.connections.pop(connection_id, None) if connection is None: return # Connection already self.logger.info('Connection %s is being closed due to received close event.', connection.id) connection.stop()
evocodebench_data_272
"""Naive popcount implementation until such time that's exposed in numpy (SOON!).""" import numpy as np m1 = np.uint64(0x5555555555555555) m2 = np.uint64(0x3333333333333333) m3 = np.uint64(0x0F0F0F0F0F0F0F0F) m4 = np.uint64(0x0101010101010101) mask = np.uint64(-1) s55 = np.uint64(m1 & mask) # Add more digits for 128bit support s33 = np.uint64(m2 & mask) s0F = np.uint64(m3 & mask) s01 = np.uint64(m4 & mask) num_bytes_64 = 8 all_but_one_bit = np.uint64(8 * (num_bytes_64 - 1)) _1 = np.uint64(1) _2 = np.uint64(2) _4 = np.uint64(4) def bit_count64(arr): """Count the number of bits set in each element in the array.""" arr = arr - ((arr >> _1) & s55) arr = (arr & s33) + ((arr >> _2) & s33) arr += (arr >> _4) arr &= s0F arr *= s01 arr >>= all_but_one_bit return arr
evocodebench_data_273
"""Utility functions for Solr users of searcharray.""" import re import pandas as pd import numpy as np from typing import List, Optional, Dict, Tuple from searcharray.postings import SearchArray from searcharray.similarity import Similarity, default_bm25 def parse_min_should_match(num_clauses: int, spec: str) -> int: """Parse Solr's min should match (ie mm) spec. See this ChatGPT translation of mm code from Solr's Java code for parsing this https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb Parameters ---------- num_clauses : int spec : str Returns ------- int : the number of clauses that must match """ def checked_parse_int(value, error_message): try: return int(value) except ValueError: raise ValueError(error_message) result = num_clauses spec = spec.strip() if '<' in spec: # we have conditional spec(s) space_around_less_than_pattern = re.compile(r'\s*<\s*') spec = space_around_less_than_pattern.sub('<', spec) for s in spec.split(): parts = s.split('<', 1) if len(parts) < 2: raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'") upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.") if num_clauses <= upper_bound: return result else: result = parse_min_should_match(num_clauses, parts[1]) return result # otherwise, simple expression if '%' in spec: # percentage - assume the % was the last char. If not, let int() fail. spec = spec[:-1] percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") calc = (result * percent) * (1 / 100) result = result + int(calc) if calc < 0 else int(calc) else: calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.") result = result + calc if calc < 0 else calc return min(num_clauses, max(result, 0)) def parse_field_boosts(field_lists: List[str]) -> dict: """Parse Solr's qf, pf, pf2, pf3 field boosts.""" if not field_lists: return {} out = {} carat_pattern = re.compile(r'\^') for field in field_lists: parts = carat_pattern.split(field) out[parts[0]] = None if len(parts) == 1 else float(parts[1]) return out def get_field(frame, field) -> SearchArray: if field not in frame.columns: raise ValueError(f"Field {field} not in dataframe") if not isinstance(frame[field].array, SearchArray): raise ValueError(f"Field {field} is not a searcharray field") return frame[field].array def parse_query_terms(frame: pd.DataFrame, query: str, query_fields: List[str]): search_terms: Dict[str, List[str]] = {} num_search_terms = 0 term_centric = True for field in query_fields: arr = get_field(frame, field) tokenizer = arr.tokenizer search_terms[field] = [] field_num_search_terms = 0 for posn, term in enumerate(tokenizer(query)): search_terms[field].append(term) field_num_search_terms += 1 if num_search_terms == 0: num_search_terms = field_num_search_terms elif field_num_search_terms != num_search_terms: term_centric = False return num_search_terms, search_terms, term_centric def _edismax_term_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity) -> Tuple[np.ndarray, str]: explain = [] term_scores = [] for term_posn in range(num_search_terms): max_scores = np.zeros(len(frame)) term_explain = [] for field, boost in query_fields.items(): term = search_terms[field][term_posn] post_arr = get_field(frame, field) field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" term_explain.append(f"{field}:{term}^{boost_exp}") max_scores = np.maximum(max_scores, field_term_score) term_scores.append(max_scores) explain.append("(" + " | ".join(term_explain) + ")") min_should_match = parse_min_should_match(num_search_terms, spec=mm) qf_scores = np.asarray(term_scores) matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match qf_scores = np.sum(term_scores, axis=0) qf_scores[~matches_gt_mm] = 0 return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}" def _edismax_field_centric(frame: pd.DataFrame, query_fields: Dict[str, float], num_search_terms: int, search_terms: Dict[str, List[str]], mm: str, similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: field_scores = [] explain = [] for field, boost in query_fields.items(): post_arr = get_field(frame, field) term_scores = np.array([post_arr.score(term, similarity=similarity) for term in search_terms[field]]) min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm) exp = " ".join([f"{field}:{term}" for term in search_terms[field]]) boost_exp = f"{boost}" if boost is not None else "1" exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}" exp = "(" + exp + f")^{boost_exp}" matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field])) sum_terms_bm25 = np.sum(term_scores, axis=0) sum_terms_bm25[~matches_gt_mm] = 0 field_scores.append(sum_terms_bm25 * (1 if boost is None else boost)) explain.append(exp) # Take maximum field scores as qf qf_scores = np.asarray(field_scores) qf_scores = np.max(qf_scores, axis=0) return qf_scores, " | ".join(explain) def edismax(frame: pd.DataFrame, q: str, qf: List[str], mm: Optional[str] = None, pf: Optional[List[str]] = None, pf2: Optional[List[str]] = None, pf3: Optional[List[str]] = None, q_op: str = "OR", similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]: """Run edismax search over dataframe with searcharray fields. Parameters ---------- q : str The query string mm : str The minimum should match spec qf : list The fields to search pf : list The fields to search for phrase matches pf2 : list The fields to search for bigram matches pf3 : list The fields to search for trigram matches q_op : str, optional The default operator, by default "OR" Returns ------- np.ndarray The search results """ def listify(x): return x if isinstance(x, list) else [x] query_fields = parse_field_boosts(listify(qf)) phrase_fields = parse_field_boosts(listify(pf)) if pf else {} if mm is None: mm = "1" if q_op == "AND": mm = "100%" # bigram_fields = parse_field_boosts(pf2) if pf2 else {} # trigram_fields = parse_field_boosts(pf3) if pf3 else {} num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys())) if term_centric: qf_scores, explain = _edismax_term_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) else: qf_scores, explain = _edismax_field_centric(frame, query_fields, num_search_terms, search_terms, mm, similarity=similarity) phrase_scores = [] for field, boost in phrase_fields.items(): arr = get_field(frame, field) terms = search_terms[field] field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost) boost_exp = f"{boost}" if boost is not None else "1" explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}" phrase_scores.append(field_phrase_score) if len(phrase_scores) > 0: phrase_scores = np.sum(phrase_scores, axis=0) # Add where term_scores > 0 term_match_idx = np.where(qf_scores)[0] qf_scores[term_match_idx] += phrase_scores[term_match_idx] return qf_scores, explain
evocodebench_data_274
import threading from deluder.common import * from deluder.interceptor import MessageInterceptor from deluder.interceptors.proxifier.strategy import * from deluder.interceptors.proxifier.connection import DEFAULT_CONNECTION_ID, Connection from deluder.interceptors.proxifier.server import Server class ProxifierMessageInterceptor(MessageInterceptor): """ Proxifier interceptor allows sending Deluder messages through TCP proxies using multiple strategies """ server: Server connections: Dict[str, Connection] lock: threading.Lock @classmethod def default_config(cls) -> dict: return { 'proxyHost': '127.0.0.1', 'proxyPort': 8888, 'serverHost': '127.0.0.1', 'serverPort': 25500, 'strategy': 'length', 'strategies': { 'buffer': { 'bufferSize': 65536, }, 'suffix': { 'bufferSize': 65536, 'value': '[D_END]', }, 'length': { }, }, 'autoCloseConnections': True, 'multipleConnections': True, } def init(self): self.server = Server( server_host=self.config['serverHost'], server_port=self.config['serverPort'], proxy_host=self.config['proxyHost'], proxy_port=self.config['proxyPort'], strategy=ProxifierStrategyType(self.config['strategy']), strategy_config=self.config['strategies'].get(self.config['strategy'], {}), logger=self.logger ) self.server.start() self.connections = {} self.lock = threading.Lock() def intercept(self, process: Process, message: Message): if isinstance(message, SendMessage): message.data = self._get_connection(message).c2s(message.data) elif isinstance(message, RecvMessage): message.data = self._get_connection(message).s2c(message.data) elif isinstance(message, CloseMessage): self._handle_connection_close_message(message) def destroy(self): if hasattr(self, 'connections'): for connection in self.connections.values(): connection.stop() if hasattr(self, 'server'): self.server.stop() def _extract_connection_id(self, message: Message) -> str: if self.config['multipleConnections'] == True: return message.metadata.get(MetadataType.CONNECTION_ID, DEFAULT_CONNECTION_ID) return DEFAULT_CONNECTION_ID def _get_connection(self, message: Message) -> Connection: with self.lock: # Determine connection identifier connection_id = self._extract_connection_id(message) # Get connection or create a new one if it does not exist connection = self.connections.get(connection_id) if connection is None: connection = self.server.connect() connection.set_info(connection_id) connection.start() self.connections[connection_id] = connection return connection def _handle_connection_close_message(self, message: Message): if self.config['autoCloseConnections'] is False: return # Automatic closing of connection is disabled with self.lock: # Determine connection identifier connection_id = self._extract_connection_id(message) if connection_id is None or connection_id == DEFAULT_CONNECTION_ID: return # Do not close default connection connection = self.connections.pop(connection_id, None) if connection is None: return # Connection already self.logger.info('Connection %s is being closed due to received close event.', connection.id) connection.stop()
evocodebench_data_275
import threading from deluder.common import * from deluder.interceptor import MessageInterceptor from deluder.interceptors.proxifier.strategy import * from deluder.interceptors.proxifier.connection import DEFAULT_CONNECTION_ID, Connection from deluder.interceptors.proxifier.server import Server class ProxifierMessageInterceptor(MessageInterceptor): """ Proxifier interceptor allows sending Deluder messages through TCP proxies using multiple strategies """ server: Server connections: Dict[str, Connection] lock: threading.Lock @classmethod def default_config(cls) -> dict: return { 'proxyHost': '127.0.0.1', 'proxyPort': 8888, 'serverHost': '127.0.0.1', 'serverPort': 25500, 'strategy': 'length', 'strategies': { 'buffer': { 'bufferSize': 65536, }, 'suffix': { 'bufferSize': 65536, 'value': '[D_END]', }, 'length': { }, }, 'autoCloseConnections': True, 'multipleConnections': True, } def init(self): self.server = Server( server_host=self.config['serverHost'], server_port=self.config['serverPort'], proxy_host=self.config['proxyHost'], proxy_port=self.config['proxyPort'], strategy=ProxifierStrategyType(self.config['strategy']), strategy_config=self.config['strategies'].get(self.config['strategy'], {}), logger=self.logger ) self.server.start() self.connections = {} self.lock = threading.Lock() def intercept(self, process: Process, message: Message): if isinstance(message, SendMessage): message.data = self._get_connection(message).c2s(message.data) elif isinstance(message, RecvMessage): message.data = self._get_connection(message).s2c(message.data) elif isinstance(message, CloseMessage): self._handle_connection_close_message(message) def destroy(self): if hasattr(self, 'connections'): for connection in self.connections.values(): connection.stop() if hasattr(self, 'server'): self.server.stop() def _extract_connection_id(self, message: Message) -> str: if self.config['multipleConnections'] == True: return message.metadata.get(MetadataType.CONNECTION_ID, DEFAULT_CONNECTION_ID) return DEFAULT_CONNECTION_ID def _get_connection(self, message: Message) -> Connection: with self.lock: # Determine connection identifier connection_id = self._extract_connection_id(message) # Get connection or create a new one if it does not exist connection = self.connections.get(connection_id) if connection is None: connection = self.server.connect() connection.set_info(connection_id) connection.start() self.connections[connection_id] = connection return connection def _handle_connection_close_message(self, message: Message): if self.config['autoCloseConnections'] is False: return # Automatic closing of connection is disabled with self.lock: # Determine connection identifier connection_id = self._extract_connection_id(message) if connection_id is None or connection_id == DEFAULT_CONNECTION_ID: return # Do not close default connection connection = self.connections.pop(connection_id, None) if connection is None: return # Connection already self.logger.info('Connection %s is being closed due to received close event.', connection.id) connection.stop()