id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
6,731 | import random
import PIL
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from util.box_ops import box_xyxy_to_cxcywh
from util.misc import interpolate
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
if version.parse(torchvision.__version__) < version.parse('0.7'):
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
def resize(image, target, size, max_size=None):
# size can be min_size (scalar) or (w, h) tuple
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size)
if target is None:
return rescaled_image, None
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["boxes"] = scaled_boxes
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
if "masks" in target:
target['masks'] = interpolate(
target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5
return rescaled_image, target | null |
6,732 | import random
import PIL
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from util.box_ops import box_xyxy_to_cxcywh
from util.misc import interpolate
def pad(image, target, padding):
# assumes that we only pad on the bottom right corners
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
if target is None:
return padded_image, None
target = target.copy()
# should we do something wrt the original size?
target["size"] = torch.tensor(padded_image.size[::-1])
if "masks" in target:
target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1]))
return padded_image, target | null |
6,733 | import json
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from panopticapi.utils import rgb2id
from util.box_ops import masks_to_boxes
from .coco import make_coco_transforms
class CocoPanoptic:
def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True):
def __getitem__(self, idx):
def __len__(self):
def get_height_and_width(self, idx):
def make_coco_transforms(image_set):
def build(image_set, args):
img_folder_root = Path(args.coco_path)
ann_folder_root = Path(args.coco_panoptic_path)
assert img_folder_root.exists(), f'provided COCO path {img_folder_root} does not exist'
assert ann_folder_root.exists(), f'provided COCO path {ann_folder_root} does not exist'
mode = 'panoptic'
PATHS = {
"train": ("train2017", Path("annotations") / f'{mode}_train2017.json'),
"val": ("val2017", Path("annotations") / f'{mode}_val2017.json'),
}
img_folder, ann_file = PATHS[image_set]
img_folder_path = img_folder_root / img_folder
ann_folder = ann_folder_root / f'{mode}_{img_folder}'
ann_file = ann_folder_root / ann_file
dataset = CocoPanoptic(img_folder_path, ann_folder, ann_file,
transforms=make_coco_transforms(image_set), return_masks=args.masks)
return dataset | null |
6,734 | from pathlib import Path
import torch
import torch.utils.data
import torchvision
from pycocotools import mask as coco_mask
import datasets.transforms as T
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks | null |
6,735 | from pathlib import Path
import torch
import torch.utils.data
import torchvision
from pycocotools import mask as coco_mask
import datasets.transforms as T
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms, return_masks):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks)
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = {'image_id': image_id, 'annotations': target}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def make_coco_transforms(image_set):
normalize = T.Compose([
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
if image_set == 'train':
return T.Compose([
T.RandomHorizontalFlip(),
T.RandomSelect(
T.RandomResize(scales, max_size=1333),
T.Compose([
T.RandomResize([400, 500, 600]),
T.RandomSizeCrop(384, 600),
T.RandomResize(scales, max_size=1333),
])
),
normalize,
])
if image_set == 'val':
return T.Compose([
T.RandomResize([800], max_size=1333),
normalize,
])
raise ValueError(f'unknown {image_set}')
def build(image_set, args):
root = Path(args.coco_path)
assert root.exists(), f'provided COCO path {root} does not exist'
mode = 'instances'
PATHS = {
"train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'),
"val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'),
}
img_folder, ann_file = PATHS[image_set]
dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks)
return dataset | null |
6,736 | import copy
from typing import Optional, List
import torch
import torch.nn.functional as F
from torch import nn, Tensor
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) | null |
6,737 | import copy
from typing import Optional, List
import torch
import torch.nn.functional as F
from torch import nn, Tensor
The provided code snippet includes necessary dependencies for implementing the `_get_activation_fn` function. Write a Python function `def _get_activation_fn(activation)` to solve the following problem:
Return an activation function given a string
Here is the function:
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.") | Return an activation function given a string |
6,738 | import torch
import torch.nn.functional as F
from torch import nn
from util import box_ops
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate,
is_dist_avail_and_initialized)
from .backbone import build_backbone
from .matcher import build_matcher
from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm,
dice_loss, sigmoid_focal_loss)
from .transformer import build_transformer
class DETR(nn.Module):
""" This is the DETR module that performs object detection """
def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
self.aux_loss = aux_loss
def forward(self, samples: NestedTensor):
""" The forward expects a NestedTensor, which consists of:
- samples.tensor: batched images, of shape [batch_size x 3 x H x W]
- samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, height, width). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
return out
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{'pred_logits': a, 'pred_boxes': b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
class SetCriterion(nn.Module):
""" This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
def loss_cardinality(self, outputs, targets, indices, num_boxes):
""" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
"""
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes)))
losses['loss_giou'] = loss_giou.sum() / num_boxes
return losses
def loss_masks(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(src_masks)
target_masks = target_masks[tgt_idx]
# upsample predictions to the target size
src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],
mode="bilinear", align_corners=False)
src_masks = src_masks[:, 0].flatten(1)
target_masks = target_masks.flatten(1)
target_masks = target_masks.view(src_masks.shape)
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
"loss_dice": dice_loss(src_masks, target_masks, num_boxes),
}
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
'labels': self.loss_labels,
'cardinality': self.loss_cardinality,
'boxes': self.loss_boxes,
'masks': self.loss_masks
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if loss == 'masks':
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
def forward(self, outputs, target_sizes):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
return results
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
def build_matcher(args):
return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)
class DETRsegm(nn.Module):
def __init__(self, detr, freeze_detr=False):
super().__init__()
self.detr = detr
if freeze_detr:
for p in self.parameters():
p.requires_grad_(False)
hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead
self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0)
self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)
def forward(self, samples: NestedTensor):
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.detr.backbone(samples)
bs = features[-1].tensors.shape[0]
src, mask = features[-1].decompose()
assert mask is not None
src_proj = self.detr.input_proj(src)
hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])
outputs_class = self.detr.class_embed(hs)
outputs_coord = self.detr.bbox_embed(hs).sigmoid()
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.detr.aux_loss:
out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord)
# FIXME h_boxes takes the last one computed, keep this in mind
bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)
seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])
outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])
out["pred_masks"] = outputs_seg_masks
return out
class PostProcessSegm(nn.Module):
def __init__(self, threshold=0.5):
super().__init__()
self.threshold = threshold
def forward(self, results, outputs, orig_target_sizes, max_target_sizes):
assert len(orig_target_sizes) == len(max_target_sizes)
max_h, max_w = max_target_sizes.max(0)[0].tolist()
outputs_masks = outputs["pred_masks"].squeeze(2)
outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False)
outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()
for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):
img_h, img_w = t[0], t[1]
results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)
results[i]["masks"] = F.interpolate(
results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest"
).byte()
return results
class PostProcessPanoptic(nn.Module):
"""This class converts the output of the model to the final panoptic result, in the format expected by the
coco panoptic API """
def __init__(self, is_thing_map, threshold=0.85):
"""
Parameters:
is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether
the class is a thing (True) or a stuff (False) class
threshold: confidence threshold: segments with confidence lower than this will be deleted
"""
super().__init__()
self.threshold = threshold
self.is_thing_map = is_thing_map
def forward(self, outputs, processed_sizes, target_sizes=None):
""" This function computes the panoptic prediction from the model's predictions.
Parameters:
outputs: This is a dict coming directly from the model. See the model doc for the content.
processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the
model, ie the size after data augmentation but before batching.
target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size
of each prediction. If left to None, it will default to the processed_sizes
"""
if target_sizes is None:
target_sizes = processed_sizes
assert len(processed_sizes) == len(target_sizes)
out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"]
assert len(out_logits) == len(raw_masks) == len(target_sizes)
preds = []
def to_tuple(tup):
if isinstance(tup, tuple):
return tup
return tuple(tup.cpu().tolist())
for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
):
# we filter empty queries and detection below threshold
scores, labels = cur_logits.softmax(-1).max(-1)
keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold)
cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
cur_scores = cur_scores[keep]
cur_classes = cur_classes[keep]
cur_masks = cur_masks[keep]
cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1)
cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])
h, w = cur_masks.shape[-2:]
assert len(cur_boxes) == len(cur_classes)
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
cur_masks = cur_masks.flatten(1)
stuff_equiv_classes = defaultdict(lambda: [])
for k, label in enumerate(cur_classes):
if not self.is_thing_map[label.item()]:
stuff_equiv_classes[label.item()].append(k)
def get_ids_area(masks, scores, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
m_id = masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
final_h, final_w = to_tuple(target_size)
seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))
seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)
np_seg_img = (
torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()
)
m_id = torch.from_numpy(rgb2id(np_seg_img))
area = []
for i in range(len(scores)):
area.append(m_id.eq(i).sum().item())
return area, seg_img
area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
if cur_classes.numel() > 0:
# We know filter empty masks as long as we find some
while True:
filtered_small = torch.as_tensor(
[area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device
)
if filtered_small.any().item():
cur_scores = cur_scores[~filtered_small]
cur_classes = cur_classes[~filtered_small]
cur_masks = cur_masks[~filtered_small]
area, seg_img = get_ids_area(cur_masks, cur_scores)
else:
break
else:
cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)
segments_info = []
for i, a in enumerate(area):
cat = cur_classes[i].item()
segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a})
del cur_classes
with io.BytesIO() as out:
seg_img.save(out, format="PNG")
predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
preds.append(predictions)
return preds
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
)
def build(args):
# the `num_classes` naming here is somewhat misleading.
# it indeed corresponds to `max_obj_id + 1`, where max_obj_id
# is the maximum id for a class in your dataset. For example,
# COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.
# As another example, for a dataset that has a single class with id 1,
# you should pass `num_classes` to be 2 (max_obj_id + 1).
# For more details on this, check the following discussion
# https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223
num_classes = 20 if args.dataset_file != 'coco' else 91
if args.dataset_file == "coco_panoptic":
# for panoptic, we just add a num_classes that is large enough to hold
# max_obj_id + 1, but the exact value doesn't really matter
num_classes = 250
device = torch.device(args.device)
backbone = build_backbone(args)
transformer = build_transformer(args)
model = DETR(
backbone,
transformer,
num_classes=num_classes,
num_queries=args.num_queries,
aux_loss=args.aux_loss,
)
if args.masks:
model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))
matcher = build_matcher(args)
weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}
weight_dict['loss_giou'] = args.giou_loss_coef
if args.masks:
weight_dict["loss_mask"] = args.mask_loss_coef
weight_dict["loss_dice"] = args.dice_loss_coef
# TODO this is a hack
if args.aux_loss:
aux_weight_dict = {}
for i in range(args.dec_layers - 1):
aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ['labels', 'boxes', 'cardinality']
if args.masks:
losses += ["masks"]
criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict,
eos_coef=args.eos_coef, losses=losses)
criterion.to(device)
postprocessors = {'bbox': PostProcess()}
if args.masks:
postprocessors['segm'] = PostProcessSegm()
if args.dataset_file == "coco_panoptic":
is_thing_map = {i: i <= 90 for i in range(201)}
postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85)
return model, criterion, postprocessors | null |
6,739 | import io
from collections import defaultdict
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from PIL import Image
import util.box_ops as box_ops
from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list
def _expand(tensor, length: int):
return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) | null |
6,740 | import io
from collections import defaultdict
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from PIL import Image
import util.box_ops as box_ops
from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list
The provided code snippet includes necessary dependencies for implementing the `dice_loss` function. Write a Python function `def dice_loss(inputs, targets, num_boxes)` to solve the following problem:
Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class).
Here is the function:
def dice_loss(inputs, targets, num_boxes):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_boxes | Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). |
6,741 | import io
from collections import defaultdict
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from PIL import Image
import util.box_ops as box_ops
from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list
The provided code snippet includes necessary dependencies for implementing the `sigmoid_focal_loss` function. Write a Python function `def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2)` to solve the following problem:
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples. Default = -1 (no weighting). gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor
Here is the function:
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes | Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples. Default = -1 (no weighting). gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor |
6,742 | import torch
from torchvision.ops.boxes import box_area
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1) | null |
6,743 | import torch
from torchvision.ops.boxes import box_area
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1) | null |
6,744 | import torch
from torchvision.ops.boxes import box_area
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
The provided code snippet includes necessary dependencies for implementing the `generalized_box_iou` function. Write a Python function `def generalized_box_iou(boxes1, boxes2)` to solve the following problem:
Generalized IoU from https://giou.stanford.edu/ The boxes should be in [x0, y0, x1, y1] format Returns a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)
Here is the function:
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area | Generalized IoU from https://giou.stanford.edu/ The boxes should be in [x0, y0, x1, y1] format Returns a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) |
6,745 | import torch
from torchvision.ops.boxes import box_area
The provided code snippet includes necessary dependencies for implementing the `masks_to_boxes` function. Write a Python function `def masks_to_boxes(masks)` to solve the following problem:
Compute the bounding boxes around the provided masks The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensors, with the boxes in xyxy format
Here is the function:
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = (masks * x.unsqueeze(0))
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = (masks * y.unsqueeze(0))
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1) | Compute the bounding boxes around the provided masks The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensors, with the boxes in xyxy format |
6,746 | import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from packaging import version
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
import torchvision
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message | null |
6,747 | import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from packaging import version
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
import torchvision
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], :img.shape[2]] = False
else:
raise ValueError('not supported')
return NestedTensor(tensor, mask)
def collate_fn(batch):
batch = list(zip(*batch))
batch[0] = nested_tensor_from_tensor_list(batch[0])
return tuple(batch) | null |
6,748 | import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from packaging import version
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
import torchvision
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs) | null |
6,749 | import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from packaging import version
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
import torchvision
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0) | null |
6,750 | import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from packaging import version
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
import torchvision
The provided code snippet includes necessary dependencies for implementing the `accuracy` function. Write a Python function `def accuracy(output, target, topk=(1,))` to solve the following problem:
Computes the precision@k for the specified values of k
Here is the function:
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res | Computes the precision@k for the specified values of k |
6,751 | import torch
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path, PurePath
The provided code snippet includes necessary dependencies for implementing the `plot_logs` function. Write a Python function `def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt')` to solve the following problem:
Function to plot specific fields from training log(s). Plots both training and test results. :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file - fields = which results to plot from each log file - plots both training and test for each field. - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots - log_name = optional, name of log file if different than default 'log.txt'. :: Outputs - matplotlib plots of results in fields, color coded for each log file. - solid lines are training results, dashed lines are test results.
Here is the function:
def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'):
'''
Function to plot specific fields from training log(s). Plots both training and test results.
:: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file
- fields = which results to plot from each log file - plots both training and test for each field.
- ewm_col = optional, which column to use as the exponential weighted smoothing of the plots
- log_name = optional, name of log file if different than default 'log.txt'.
:: Outputs - matplotlib plots of results in fields, color coded for each log file.
- solid lines are training results, dashed lines are test results.
'''
func_name = "plot_utils.py::plot_logs"
# verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,
# convert single Path to list to avoid 'not iterable' error
if not isinstance(logs, list):
if isinstance(logs, PurePath):
logs = [logs]
print(f"{func_name} info: logs param expects a list argument, converted to list[Path].")
else:
raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \
Expect list[Path] or single Path obj, received {type(logs)}")
# Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir
for i, dir in enumerate(logs):
if not isinstance(dir, PurePath):
raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}")
if not dir.exists():
raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}")
# verify log_name exists
fn = Path(dir / log_name)
if not fn.exists():
print(f"-> missing {log_name}. Have you gotten to Epoch 1 in training?")
print(f"--> full path of missing log file: {fn}")
return
# load log file(s) and plot
dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs]
fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))
for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):
for j, field in enumerate(fields):
if field == 'mAP':
coco_eval = pd.DataFrame(
np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1]
).ewm(com=ewm_col).mean()
axs[j].plot(coco_eval, c=color)
else:
df.interpolate().ewm(com=ewm_col).mean().plot(
y=[f'train_{field}', f'test_{field}'],
ax=axs[j],
color=[color] * 2,
style=['-', '--']
)
for ax, field in zip(axs, fields):
ax.legend([Path(p).name for p in logs])
ax.set_title(field) | Function to plot specific fields from training log(s). Plots both training and test results. :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file - fields = which results to plot from each log file - plots both training and test for each field. - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots - log_name = optional, name of log file if different than default 'log.txt'. :: Outputs - matplotlib plots of results in fields, color coded for each log file. - solid lines are training results, dashed lines are test results. |
6,752 | import torch
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path, PurePath
def plot_precision_recall(files, naming_scheme='iter'):
if naming_scheme == 'exp_id':
# name becomes exp_id
names = [f.parts[-3] for f in files]
elif naming_scheme == 'iter':
names = [f.stem for f in files]
else:
raise ValueError(f'not supported {naming_scheme}')
fig, axs = plt.subplots(ncols=2, figsize=(16, 5))
for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names):
data = torch.load(f)
# precision is n_iou, n_points, n_cat, n_area, max_det
precision = data['precision']
recall = data['params'].recThrs
scores = data['scores']
# take precision for all classes, all areas and 100 detections
precision = precision[0, :, :, 0, -1].mean(1)
scores = scores[0, :, :, 0, -1].mean(1)
prec = precision.mean()
rec = data['recall'][0, :, 0, -1].mean()
print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' +
f'score={scores.mean():0.3f}, ' +
f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}'
)
axs[0].plot(recall, precision, c=color)
axs[1].plot(recall, scores, c=color)
axs[0].set_title('Precision / Recall')
axs[0].legend(names)
axs[1].set_title('Scores / Recall')
axs[1].legend(names)
return fig, axs | null |
6,753 | import math
import os
import sys
from typing import Iterable
import torch
import util.misc as utils
from datasets.coco_eval import CocoEvaluator
from datasets.panoptic_eval import PanopticEvaluator
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, max_norm: float = 0):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()} | null |
6,754 | import math
import os
import sys
from typing import Iterable
import torch
import util.misc as utils
from datasets.coco_eval import CocoEvaluator
from datasets.panoptic_eval import PanopticEvaluator
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
# suppress pycocotools prints
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
'keypoints': keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
class PanopticEvaluator(object):
def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"):
self.gt_json = ann_file
self.gt_folder = ann_folder
if utils.is_main_process():
if not os.path.exists(output_dir):
os.mkdir(output_dir)
self.output_dir = output_dir
self.predictions = []
def update(self, predictions):
for p in predictions:
with open(os.path.join(self.output_dir, p["file_name"]), "wb") as f:
f.write(p.pop("png_string"))
self.predictions += predictions
def synchronize_between_processes(self):
all_predictions = utils.all_gather(self.predictions)
merged_predictions = []
for p in all_predictions:
merged_predictions += p
self.predictions = merged_predictions
def summarize(self):
if utils.is_main_process():
json_data = {"annotations": self.predictions}
predictions_json = os.path.join(self.output_dir, "predictions.json")
with open(predictions_json, "w") as f:
f.write(json.dumps(json_data))
return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir)
return None
def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Test:'
iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())
coco_evaluator = CocoEvaluator(base_ds, iou_types)
# coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]
panoptic_evaluator = None
if 'panoptic' in postprocessors.keys():
panoptic_evaluator = PanopticEvaluator(
data_loader.dataset.ann_file,
data_loader.dataset.ann_folder,
output_dir=os.path.join(output_dir, "panoptic_eval"),
)
for samples, targets in metric_logger.log_every(data_loader, 10, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),
**loss_dict_reduced_scaled,
**loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = postprocessors['bbox'](outputs, orig_target_sizes)
if 'segm' in postprocessors.keys():
target_sizes = torch.stack([t["size"] for t in targets], dim=0)
results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
res = {target['image_id'].item(): output for target, output in zip(targets, results)}
if coco_evaluator is not None:
coco_evaluator.update(res)
if panoptic_evaluator is not None:
res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes)
for i, target in enumerate(targets):
image_id = target["image_id"].item()
file_name = f"{image_id:012d}.png"
res_pano[i]["image_id"] = image_id
res_pano[i]["file_name"] = file_name
panoptic_evaluator.update(res_pano)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
if coco_evaluator is not None:
coco_evaluator.synchronize_between_processes()
if panoptic_evaluator is not None:
panoptic_evaluator.synchronize_between_processes()
# accumulate predictions from all images
if coco_evaluator is not None:
coco_evaluator.accumulate()
coco_evaluator.summarize()
panoptic_res = None
if panoptic_evaluator is not None:
panoptic_res = panoptic_evaluator.summarize()
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
if coco_evaluator is not None:
if 'bbox' in postprocessors.keys():
stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()
if 'segm' in postprocessors.keys():
stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist()
if panoptic_res is not None:
stats['PQ_all'] = panoptic_res["All"]
stats['PQ_th'] = panoptic_res["Things"]
stats['PQ_st'] = panoptic_res["Stuff"]
return stats, coco_evaluator | null |
6,755 | import numpy as np
from lib_layerdiffusion.enums import ResizeMode
from ldm_patched.modules import model_management
import cv2
import torch
def forge_clip_encode(clip, text):
if text is None:
return None
tokens = clip.tokenize(text, return_word_ids=True)
cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
return cond.to(model_management.get_torch_device()) | null |
6,756 | import numpy as np
from lib_layerdiffusion.enums import ResizeMode
from ldm_patched.modules import model_management
import cv2
import torch
def rgba2rgbfp32(x):
rgb = x[..., :3].astype(np.float32) / 255.0
a = x[..., 3:4].astype(np.float32) / 255.0
return 0.5 + (rgb - 0.5) * a | null |
6,757 | import numpy as np
from lib_layerdiffusion.enums import ResizeMode
from ldm_patched.modules import model_management
import cv2
import torch
def to255unit8(x):
return (x * 255.0).clip(0, 255).astype(np.uint8) | null |
6,758 | import numpy as np
from lib_layerdiffusion.enums import ResizeMode
from ldm_patched.modules import model_management
import cv2
import torch
def safe_numpy(x):
def high_quality_resize(x, size):
class ResizeMode(Enum):
def int_value(self):
def crop_and_resize_image(detected_map, resize_mode, h, w):
if resize_mode == ResizeMode.RESIZE:
detected_map = high_quality_resize(detected_map, (w, h))
detected_map = safe_numpy(detected_map)
return detected_map
old_h, old_w, _ = detected_map.shape
old_w = float(old_w)
old_h = float(old_h)
k0 = float(h) / old_h
k1 = float(w) / old_w
safeint = lambda x: int(np.round(x))
if resize_mode == ResizeMode.RESIZE_AND_FILL:
k = min(k0, k1)
borders = np.concatenate([detected_map[0, :, :], detected_map[-1, :, :], detected_map[:, 0, :], detected_map[:, -1, :]], axis=0)
high_quality_border_color = np.median(borders, axis=0).astype(detected_map.dtype)
high_quality_background = np.tile(high_quality_border_color[None, None], [h, w, 1])
detected_map = high_quality_resize(detected_map, (safeint(old_w * k), safeint(old_h * k)))
new_h, new_w, _ = detected_map.shape
pad_h = max(0, (h - new_h) // 2)
pad_w = max(0, (w - new_w) // 2)
high_quality_background[pad_h:pad_h + new_h, pad_w:pad_w + new_w] = detected_map
detected_map = high_quality_background
detected_map = safe_numpy(detected_map)
return detected_map
else:
k = max(k0, k1)
detected_map = high_quality_resize(detected_map, (safeint(old_w * k), safeint(old_h * k)))
new_h, new_w, _ = detected_map.shape
pad_h = max(0, (new_h - h) // 2)
pad_w = max(0, (new_w - w) // 2)
detected_map = detected_map[pad_h:pad_h+h, pad_w:pad_w+w]
detected_map = safe_numpy(detected_map)
return detected_map | null |
6,759 | import numpy as np
from lib_layerdiffusion.enums import ResizeMode
from ldm_patched.modules import model_management
import cv2
import torch
def pytorch_to_numpy(x):
return [np.clip(255. * y.cpu().numpy(), 0, 255).astype(np.uint8) for y in x] | null |
6,760 | import numpy as np
from lib_layerdiffusion.enums import ResizeMode
from ldm_patched.modules import model_management
import cv2
import torch
def numpy_to_pytorch(x):
y = x.astype(np.float32) / 255.0
y = y[None]
y = np.ascontiguousarray(y.copy())
y = torch.from_numpy(y).float()
return y | null |
6,761 | import torch.nn as nn
import torch
import cv2
import numpy as np
from tqdm import tqdm
from typing import Optional, Tuple
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block
import ldm_patched.modules.model_management as model_management
from ldm_patched.modules.model_patcher import ModelPatcher
The provided code snippet includes necessary dependencies for implementing the `zero_module` function. Write a Python function `def zero_module(module)` to solve the following problem:
Zero out the parameters of a module and return it.
Here is the function:
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module | Zero out the parameters of a module and return it. |
6,762 | import torch.nn as nn
import torch
import cv2
import numpy as np
from tqdm import tqdm
from typing import Optional, Tuple
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block
import ldm_patched.modules.model_management as model_management
from ldm_patched.modules.model_patcher import ModelPatcher
def checkerboard(shape):
return np.indices(shape).sum(axis=0) % 2 | null |
6,763 | import gradio as gr
import os
import functools
import torch
import numpy as np
import copy
from modules import scripts
from modules.processing import StableDiffusionProcessing
from lib_layerdiffusion.enums import ResizeMode
from lib_layerdiffusion.utils import rgba2rgbfp32, to255unit8, crop_and_resize_image, forge_clip_encode
from enum import Enum
from modules.paths import models_path
from ldm_patched.modules.utils import load_torch_file
from lib_layerdiffusion.models import TransparentVAEDecoder, TransparentVAEEncoder
from ldm_patched.modules.model_management import current_loaded_models
from modules_forge.forge_sampler import sampling_prepare
from modules.modelloader import load_file_from_url
from lib_layerdiffusion.attention_sharing import AttentionSharingPatcher
from ldm_patched.modules import model_management
def is_model_loaded(model):
return any(model == m.model for m in current_loaded_models) | null |
6,764 | import gradio as gr
import os
import functools
import torch
import numpy as np
import copy
from modules import scripts
from modules.processing import StableDiffusionProcessing
from lib_layerdiffusion.enums import ResizeMode
from lib_layerdiffusion.utils import rgba2rgbfp32, to255unit8, crop_and_resize_image, forge_clip_encode
from enum import Enum
from modules.paths import models_path
from ldm_patched.modules.utils import load_torch_file
from lib_layerdiffusion.models import TransparentVAEDecoder, TransparentVAEEncoder
from ldm_patched.modules.model_management import current_loaded_models
from modules_forge.forge_sampler import sampling_prepare
from modules.modelloader import load_file_from_url
from lib_layerdiffusion.attention_sharing import AttentionSharingPatcher
from ldm_patched.modules import model_management
def load_layer_model_state_dict(filename):
return load_torch_file(filename, safe_load=True) | null |
6,765 | import asyncio
import re
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import HTMLResponse, FileResponse
from tortoise.contrib.fastapi import register_tortoise
from apps.base.views import share_api
from apps.admin.views import admin_api
from core.settings import data_root, settings, BASE_DIR
from core.tasks import delete_expire_files
from core.utils import max_save_times_desc
BASE_DIR = Path(__file__).resolve().parent.parent
settings = Settings()
async def max_save_times_desc(max_save_seconds: int):
"""
获取最大保存时间的描述
:param max_save_seconds:
:return:
"""
def gen_desc_zh(value: int, desc: str):
if value > 0:
return f'{value}{desc}'
else:
return ''
def gen_desc_en(value: int, desc: str):
if value > 0:
ret = f'{value} {desc}'
if value > 1:
ret += 's'
ret += ' '
return ret
else:
return ''
max_timedelta = datetime.timedelta(seconds=max_save_seconds)
desc_zh, desc_en = '最长保存时间:', 'Max save time: '
desc_zh += gen_desc_zh(max_timedelta.days, '天')
desc_en += gen_desc_en(max_timedelta.days, 'day')
desc_zh += gen_desc_zh(max_timedelta.seconds // 3600, '小时')
desc_en += gen_desc_en(max_timedelta.seconds // 3600, 'hour')
desc_zh += gen_desc_zh(max_timedelta.seconds % 3600 // 60, '分钟')
desc_en += gen_desc_en(max_timedelta.seconds % 3600 // 60, 'minute')
desc_zh += gen_desc_zh(max_timedelta.seconds % 60, '秒')
desc_en += gen_desc_en(max_timedelta.seconds % 60, 'second')
return desc_zh, desc_en
async def assets(file_path: str):
if settings.max_save_seconds > 0:
if re.match(r'SendView-[\d|a-f|A-F]+\.js', file_path):
with open(BASE_DIR / f'./fcb-fronted/dist/assets/{file_path}', 'r', encoding='utf-8') as f:
# 删除永久保存选项
content = f.read()
content = content.replace('_(c,{label:e(r)("send.expireData.forever"),value:"forever"},null,8,["label"]),', '')
return HTMLResponse(content=content, media_type='text/javascript')
if re.match(r'index-[\d|a-f|A-F]+\.js', file_path):
with open(BASE_DIR / f'./fcb-fronted/dist/assets/{file_path}', 'r', encoding='utf-8') as f:
# 更改本文描述
desc_zh, desc_en = await max_save_times_desc(settings.max_save_seconds)
content = f.read()
content = content.replace('天数<7', desc_zh)
content = content.replace('Days <7', desc_en)
return HTMLResponse(content=content, media_type='text/javascript')
return FileResponse(f'./fcb-fronted/dist/assets/{file_path}') | null |
6,766 | import asyncio
import re
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import HTMLResponse, FileResponse
from tortoise.contrib.fastapi import register_tortoise
from apps.base.views import share_api
from apps.admin.views import admin_api
from core.settings import data_root, settings, BASE_DIR
from core.tasks import delete_expire_files
from core.utils import max_save_times_desc
async def delete_expire_files():
while True:
try:
await error_ip_limit.remove_expired_ip()
await upload_ip_limit.remove_expired_ip()
expire_data = await FileCodes.filter(Q(expired_at__lt=await get_now()) | Q(expired_count=0)).all()
for exp in expire_data:
await file_storage.delete_file(exp)
await exp.delete()
except Exception as e:
print(e)
finally:
await asyncio.sleep(600)
async def startup_event():
# 启动后台任务,不定时删除过期文件
asyncio.create_task(delete_expire_files()) | null |
6,767 | import asyncio
import re
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import HTMLResponse, FileResponse
from tortoise.contrib.fastapi import register_tortoise
from apps.base.views import share_api
from apps.admin.views import admin_api
from core.settings import data_root, settings, BASE_DIR
from core.tasks import delete_expire_files
from core.utils import max_save_times_desc
BASE_DIR = Path(__file__).resolve().parent.parent
settings = Settings()
async def index():
return HTMLResponse(
content=open(BASE_DIR / './fcb-fronted/dist/index.html', 'r', encoding='utf-8').read()
.replace('{{title}}', str(settings.name))
.replace('{{description}}', str(settings.description))
.replace('{{keywords}}', str(settings.keywords))
.replace('{{opacity}}', str(settings.opacity))
.replace('{{background}}', str(settings.background))
, media_type='text/html', headers={'Cache-Control': 'no-cache'}) | null |
6,768 | from typing import Union
from fastapi import Header, HTTPException
from fastapi.requests import Request
from core.settings import settings
settings = Settings()
async def admin_required(authorization: Union[str, None] = Header(default=None), request: Request = None):
is_admin = authorization == str(settings.admin_token)
if request.url.path.startswith('/share/'):
if not settings.openUpload and not is_admin:
raise HTTPException(status_code=403, detail='本站未开启游客上传,如需上传请先登录后台')
else:
if not is_admin:
raise HTTPException(status_code=401, detail='未授权或授权校验失败') | null |
6,769 | import math
from fastapi import APIRouter, Depends
from apps.admin.depends import admin_required
from apps.admin.pydantics import IDData
from apps.base.models import FileCodes
from core.response import APIResponse
from core.settings import settings
from core.storage import file_storage
class APIResponse(GenericModel, Generic[T]):
code: int = 200
message: str = 'ok'
detail: T
async def login():
return APIResponse() | null |
6,770 | import math
from fastapi import APIRouter, Depends
from apps.admin.depends import admin_required
from apps.admin.pydantics import IDData
from apps.base.models import FileCodes
from core.response import APIResponse
from core.settings import settings
from core.storage import file_storage
class IDData(BaseModel):
id: int
class FileCodes(Model):
id: Optional[int] = fields.IntField(pk=True)
code: Optional[int] = fields.CharField(description='分享码', max_length=255, index=True, unique=True)
prefix: Optional[str] = fields.CharField(max_length=255, description='前缀', default='')
suffix: Optional[str] = fields.CharField(max_length=255, description='后缀', default='')
uuid_file_name: Optional[str] = fields.CharField(max_length=255, description='uuid文件名', null=True)
file_path: Optional[str] = fields.CharField(max_length=255, description='文件路径', null=True)
size: Optional[int] = fields.IntField(description='文件大小', default=0)
text: Optional[str] = fields.TextField(description='文本内容', null=True)
expired_at: Optional[datetime] = fields.DatetimeField(null=True, description='过期时间')
expired_count: Optional[int] = fields.IntField(description='可用次数', default=0)
used_count: Optional[int] = fields.IntField(description='已用次数', default=0)
created_at: Optional[datetime] = fields.DatetimeField(auto_now_add=True, description='创建时间')
async def is_expired(self):
# 按时间
if self.expired_at is None:
return False
if self.expired_at and self.expired_count < 0:
return self.expired_at < await get_now()
# 按次数
else:
return self.expired_count <= 0
async def get_file_path(self):
return f"{self.file_path}/{self.uuid_file_name}"
class APIResponse(GenericModel, Generic[T]):
code: int = 200
message: str = 'ok'
detail: T
file_storage: FileStorageInterface = storages[settings.file_storage]()
async def file_delete(data: IDData):
file_code = await FileCodes.get(id=data.id)
await file_storage.delete_file(file_code)
await file_code.delete()
return APIResponse() | null |
6,771 | import math
from fastapi import APIRouter, Depends
from apps.admin.depends import admin_required
from apps.admin.pydantics import IDData
from apps.base.models import FileCodes
from core.response import APIResponse
from core.settings import settings
from core.storage import file_storage
class FileCodes(Model):
id: Optional[int] = fields.IntField(pk=True)
code: Optional[int] = fields.CharField(description='分享码', max_length=255, index=True, unique=True)
prefix: Optional[str] = fields.CharField(max_length=255, description='前缀', default='')
suffix: Optional[str] = fields.CharField(max_length=255, description='后缀', default='')
uuid_file_name: Optional[str] = fields.CharField(max_length=255, description='uuid文件名', null=True)
file_path: Optional[str] = fields.CharField(max_length=255, description='文件路径', null=True)
size: Optional[int] = fields.IntField(description='文件大小', default=0)
text: Optional[str] = fields.TextField(description='文本内容', null=True)
expired_at: Optional[datetime] = fields.DatetimeField(null=True, description='过期时间')
expired_count: Optional[int] = fields.IntField(description='可用次数', default=0)
used_count: Optional[int] = fields.IntField(description='已用次数', default=0)
created_at: Optional[datetime] = fields.DatetimeField(auto_now_add=True, description='创建时间')
async def is_expired(self):
# 按时间
if self.expired_at is None:
return False
if self.expired_at and self.expired_count < 0:
return self.expired_at < await get_now()
# 按次数
else:
return self.expired_count <= 0
async def get_file_path(self):
return f"{self.file_path}/{self.uuid_file_name}"
class APIResponse(GenericModel, Generic[T]):
code: int = 200
message: str = 'ok'
detail: T
async def file_list(page: float = 1, size: int = 10):
return APIResponse(detail={
'page': page,
'size': size,
'data': await FileCodes.all().limit(size).offset((math.ceil(page) - 1) * size),
'total': await FileCodes.all().count(),
}) | null |
6,772 | import math
from fastapi import APIRouter, Depends
from apps.admin.depends import admin_required
from apps.admin.pydantics import IDData
from apps.base.models import FileCodes
from core.response import APIResponse
from core.settings import settings
from core.storage import file_storage
class APIResponse(GenericModel, Generic[T]):
code: int = 200
message: str = 'ok'
detail: T
settings = Settings()
async def get_config():
return APIResponse(detail=settings.__dict__) | null |
6,773 | import math
from fastapi import APIRouter, Depends
from apps.admin.depends import admin_required
from apps.admin.pydantics import IDData
from apps.base.models import FileCodes
from core.response import APIResponse
from core.settings import settings
from core.storage import file_storage
class APIResponse(GenericModel, Generic[T]):
code: int = 200
message: str = 'ok'
detail: T
settings = Settings()
async def update_config(data: dict):
for k, v in data.items():
settings.__setattr__(k, v)
return APIResponse() | null |
6,774 | from apps.admin.depends import admin_required
from apps.base.models import FileCodes
from apps.base.pydantics import SelectFileModel
from apps.base.utils import get_expire_info, get_file_path_name, error_ip_limit, upload_ip_limit
from core.response import APIResponse
from core.settings import settings
from core.storage import file_storage
from core.utils import get_select_token
class FileCodes(Model):
id: Optional[int] = fields.IntField(pk=True)
code: Optional[int] = fields.CharField(description='分享码', max_length=255, index=True, unique=True)
prefix: Optional[str] = fields.CharField(max_length=255, description='前缀', default='')
suffix: Optional[str] = fields.CharField(max_length=255, description='后缀', default='')
uuid_file_name: Optional[str] = fields.CharField(max_length=255, description='uuid文件名', null=True)
file_path: Optional[str] = fields.CharField(max_length=255, description='文件路径', null=True)
size: Optional[int] = fields.IntField(description='文件大小', default=0)
text: Optional[str] = fields.TextField(description='文本内容', null=True)
expired_at: Optional[datetime] = fields.DatetimeField(null=True, description='过期时间')
expired_count: Optional[int] = fields.IntField(description='可用次数', default=0)
used_count: Optional[int] = fields.IntField(description='已用次数', default=0)
created_at: Optional[datetime] = fields.DatetimeField(auto_now_add=True, description='创建时间')
async def is_expired(self):
# 按时间
if self.expired_at is None:
return False
if self.expired_at and self.expired_count < 0:
return self.expired_at < await get_now()
# 按次数
else:
return self.expired_count <= 0
async def get_file_path(self):
return f"{self.file_path}/{self.uuid_file_name}"
async def get_expire_info(expire_value: int, expire_style: str):
"""
获取过期信息
:param expire_value:
:param expire_style:
:return: expired_at 过期时间, expired_count 可用次数, used_count 已用次数, code 随机码
"""
expired_count, used_count, now, code = -1, 0, datetime.datetime.now(), None
if settings.max_save_seconds > 0:
max_timedelta = datetime.timedelta(seconds=settings.max_save_seconds)
detail = await max_save_times_desc(settings.max_save_seconds)
detail = f'保存时间超过限制,{detail[0]}'
else:
max_timedelta = datetime.timedelta(days=7)
detail = '保存时间超过限制,最长保存时间:7天'
if expire_style == 'day':
if datetime.timedelta(days=expire_value) > max_timedelta:
raise HTTPException(status_code=403, detail=detail)
expired_at = now + datetime.timedelta(days=expire_value)
elif expire_style == 'hour':
if datetime.timedelta(hours=expire_value) > max_timedelta:
raise HTTPException(status_code=403, detail=detail)
expired_at = now + datetime.timedelta(hours=expire_value)
elif expire_style == 'minute':
if datetime.timedelta(minutes=expire_value) > max_timedelta:
raise HTTPException(status_code=403, detail=detail)
expired_at = now + datetime.timedelta(minutes=expire_value)
elif expire_style == 'count':
expired_at = now + datetime.timedelta(days=1)
expired_count = expire_value
elif expire_style == 'forever':
expired_at = None
code = await get_random_code(style='string')
else:
expired_at = now + datetime.timedelta(days=1)
if not code:
code = await get_random_code()
return expired_at, expired_count, used_count, code
class APIResponse(GenericModel, Generic[T]):
code: int = 200
message: str = 'ok'
detail: T
async def share_text(text: str = Form(...), expire_value: int = Form(default=1, gt=0), expire_style: str = Form(default='day'), ip: str = Depends(upload_ip_limit)):
# 获取过期信息
expired_at, expired_count, used_count, code = await get_expire_info(expire_value, expire_style)
# 创建一个新的FileCodes实例
await FileCodes.create(
code=code,
text=text,
expired_at=expired_at,
expired_count=expired_count,
used_count=used_count,
size=len(text),
prefix='文本分享'
)
# 添加IP到限制列表
upload_ip_limit.add_ip(ip)
# 返回API响应
return APIResponse(detail={
'code': code,
}) | null |
6,775 | from apps.admin.depends import admin_required
from apps.base.models import FileCodes
from apps.base.pydantics import SelectFileModel
from apps.base.utils import get_expire_info, get_file_path_name, error_ip_limit, upload_ip_limit
from core.response import APIResponse
from core.settings import settings
from core.storage import file_storage
from core.utils import get_select_token
class FileCodes(Model):
id: Optional[int] = fields.IntField(pk=True)
code: Optional[int] = fields.CharField(description='分享码', max_length=255, index=True, unique=True)
prefix: Optional[str] = fields.CharField(max_length=255, description='前缀', default='')
suffix: Optional[str] = fields.CharField(max_length=255, description='后缀', default='')
uuid_file_name: Optional[str] = fields.CharField(max_length=255, description='uuid文件名', null=True)
file_path: Optional[str] = fields.CharField(max_length=255, description='文件路径', null=True)
size: Optional[int] = fields.IntField(description='文件大小', default=0)
text: Optional[str] = fields.TextField(description='文本内容', null=True)
expired_at: Optional[datetime] = fields.DatetimeField(null=True, description='过期时间')
expired_count: Optional[int] = fields.IntField(description='可用次数', default=0)
used_count: Optional[int] = fields.IntField(description='已用次数', default=0)
created_at: Optional[datetime] = fields.DatetimeField(auto_now_add=True, description='创建时间')
async def is_expired(self):
# 按时间
if self.expired_at is None:
return False
if self.expired_at and self.expired_count < 0:
return self.expired_at < await get_now()
# 按次数
else:
return self.expired_count <= 0
async def get_file_path(self):
return f"{self.file_path}/{self.uuid_file_name}"
async def get_file_path_name(file: UploadFile):
"""
获取文件路径和文件名
:param file:
:return: {
'path': 'share/data/2021/08/13',
'suffix': '.jpg',
'prefix': 'test',
'file_uuid': '44a83bbd70e04c8aa7fd93bfd8c88249',
'uuid_file_name': '44a83bbd70e04c8aa7fd93bfd8c88249.jpg',
'save_path': 'share/data/2021/08/13/44a83bbd70e04c8aa7fd93bfd8c88249.jpg'
}
"""
today = datetime.datetime.now()
path = f"share/data/{today.strftime('%Y/%m/%d')}"
prefix, suffix = os.path.splitext(file.filename)
file_uuid = f"{uuid.uuid4().hex}"
uuid_file_name = f"{file_uuid}{suffix}"
save_path = f"{path}/{uuid_file_name}"
return path, suffix, prefix, uuid_file_name, save_path
async def get_expire_info(expire_value: int, expire_style: str):
"""
获取过期信息
:param expire_value:
:param expire_style:
:return: expired_at 过期时间, expired_count 可用次数, used_count 已用次数, code 随机码
"""
expired_count, used_count, now, code = -1, 0, datetime.datetime.now(), None
if settings.max_save_seconds > 0:
max_timedelta = datetime.timedelta(seconds=settings.max_save_seconds)
detail = await max_save_times_desc(settings.max_save_seconds)
detail = f'保存时间超过限制,{detail[0]}'
else:
max_timedelta = datetime.timedelta(days=7)
detail = '保存时间超过限制,最长保存时间:7天'
if expire_style == 'day':
if datetime.timedelta(days=expire_value) > max_timedelta:
raise HTTPException(status_code=403, detail=detail)
expired_at = now + datetime.timedelta(days=expire_value)
elif expire_style == 'hour':
if datetime.timedelta(hours=expire_value) > max_timedelta:
raise HTTPException(status_code=403, detail=detail)
expired_at = now + datetime.timedelta(hours=expire_value)
elif expire_style == 'minute':
if datetime.timedelta(minutes=expire_value) > max_timedelta:
raise HTTPException(status_code=403, detail=detail)
expired_at = now + datetime.timedelta(minutes=expire_value)
elif expire_style == 'count':
expired_at = now + datetime.timedelta(days=1)
expired_count = expire_value
elif expire_style == 'forever':
expired_at = None
code = await get_random_code(style='string')
else:
expired_at = now + datetime.timedelta(days=1)
if not code:
code = await get_random_code()
return expired_at, expired_count, used_count, code
class APIResponse(GenericModel, Generic[T]):
code: int = 200
message: str = 'ok'
detail: T
settings = Settings()
file_storage: FileStorageInterface = storages[settings.file_storage]()
async def share_file(expire_value: int = Form(default=1, gt=0), expire_style: str = Form(default='day'), file: UploadFile = File(...), ip: str = Depends(upload_ip_limit)):
# 检查文件大小是否超过限制
if file.size > settings.uploadSize:
raise HTTPException(status_code=403, detail=f'文件大小超过限制,最大为{settings.uploadSize}字节')
# 获取过期信息
expired_at, expired_count, used_count, code = await get_expire_info(expire_value, expire_style)
# 获取文件路径和名称
path, suffix, prefix, uuid_file_name, save_path = await get_file_path_name(file)
# 保存文件
await file_storage.save_file(file, save_path)
# 创建一个新的FileCodes实例
await FileCodes.create(
code=code,
prefix=prefix,
suffix=suffix,
uuid_file_name=uuid_file_name,
file_path=path,
size=file.size,
expired_at=expired_at,
expired_count=expired_count,
used_count=used_count,
)
# 添加IP到限制列表
upload_ip_limit.add_ip(ip)
# 返回API响应
return APIResponse(detail={
'code': code,
'name': file.filename,
}) | null |
6,776 | from apps.admin.depends import admin_required
from apps.base.models import FileCodes
from apps.base.pydantics import SelectFileModel
from apps.base.utils import get_expire_info, get_file_path_name, error_ip_limit, upload_ip_limit
from core.response import APIResponse
from core.settings import settings
from core.storage import file_storage
from core.utils import get_select_token
e_code = await FileCodes.filter(code=code).first()
_code:
return False, '文件不存在'
le_code.is_expired() and check:
return False, '文件已过期',
return True, file_cod
class APIResponse(GenericModel, Generic[T]):
code: int = 200
message: str = 'ok'
detail: T
file_storage: FileStorageInterface = storages[settings.file_storage]()
async def get_code_file(code: str, ip: str = Depends(error_ip_limit)):
# 获取文件
has, file_code = await get_code_file_by_code(code)
# 检查文件是否存在
if not has:
# 添加IP到限制列表
error_ip_limit.add_ip(ip)
# 返回API响应
return APIResponse(code=404, detail=file_code)
# 更新文件的使用次数和过期次数
file_code.used_count += 1
file_code.expired_count -= 1
# 保存文件
await file_code.save()
# 返回文件响应
return await file_storage.get_file_response(file_code) | null |
6,777 | from apps.admin.depends import admin_required
from apps.base.models import FileCodes
from apps.base.pydantics import SelectFileModel
from apps.base.utils import get_expire_info, get_file_path_name, error_ip_limit, upload_ip_limit
from core.response import APIResponse
from core.settings import settings
from core.storage import file_storage
from core.utils import get_select_token
e_code = await FileCodes.filter(code=code).first()
_code:
return False, '文件不存在'
le_code.is_expired() and check:
return False, '文件已过期',
return True, file_cod
class SelectFileModel(BaseModel):
code: str
class APIResponse(GenericModel, Generic[T]):
code: int = 200
message: str = 'ok'
detail: T
file_storage: FileStorageInterface = storages[settings.file_storage]()
async def select_file(data: SelectFileModel, ip: str = Depends(error_ip_limit)):
# 获取文件
has, file_code = await get_code_file_by_code(data.code)
# 检查文件是否存在
if not has:
# 添加IP到限制列表
error_ip_limit.add_ip(ip)
# 返回API响应
return APIResponse(code=404, detail=file_code)
# 更新文件的使用次数和过期次数
file_code.used_count += 1
file_code.expired_count -= 1
# 保存文件
await file_code.save()
# 返回API响应
return APIResponse(detail={
'code': file_code.code,
'name': file_code.prefix + file_code.suffix,
'size': file_code.size,
'text': file_code.text if file_code.text is not None else await file_storage.get_file_url(file_code),
}) | null |
6,778 | from apps.admin.depends import admin_required
from apps.base.models import FileCodes
from apps.base.pydantics import SelectFileModel
from apps.base.utils import get_expire_info, get_file_path_name, error_ip_limit, upload_ip_limit
from core.response import APIResponse
from core.settings import settings
from core.storage import file_storage
from core.utils import get_select_token
e_code = await FileCodes.filter(code=code).first()
_code:
return False, '文件不存在'
le_code.is_expired() and check:
return False, '文件已过期',
return True, file_cod
class APIResponse(GenericModel, Generic[T]):
code: int = 200
message: str = 'ok'
detail: T
file_storage: FileStorageInterface = storages[settings.file_storage]()
async def get_select_token(code: str):
"""
获取下载token
:param code:
:return:
"""
token = "123456"
return hashlib.sha256(f"{code}{int(time.time() / 1000)}000{token}".encode()).hexdigest()
async def download_file(key: str, code: str, ip: str = Depends(error_ip_limit)):
# 检查token是否有效
is_valid = await get_select_token(code) == key
if not is_valid:
# 添加IP到限制列表
error_ip_limit.add_ip(ip)
# 获取文件
has, file_code = await get_code_file_by_code(code, False)
# 检查文件是否存在
if not has:
# 返回API响应
return APIResponse(code=404, detail='文件不存在')
# 如果文件是文本,返回文本内容,否则返回文件响应
if file_code.text:
return APIResponse(detail=file_code.text)
else:
return await file_storage.get_file_response(file_code) | null |
6,779 | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
from docutils import nodes
from docutils.parsers.rst import Directive, directives
The provided code snippet includes necessary dependencies for implementing the `get_option` function. Write a Python function `def get_option(options: dict[str, Any], key: str, default: Any) -> Any` to solve the following problem:
Get an option.
Here is the function:
def get_option(options: dict[str, Any], key: str, default: Any) -> Any:
"""Get an option."""
if key not in options:
return default
if isinstance(default, bool):
return True
return options[key] | Get an option. |
6,780 | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
from docutils import nodes
from docutils.parsers.rst import Directive, directives
class video(nodes.General, nodes.Element):
"""A video node."""
class Video(Directive):
"""A docutils video directive."""
has_content = True
required_arguments = 1
optional_arguments = 5
final_argument_whitespace = False
option_spec: ClassVar[
dict[
str,
Directive,
]
] = {
"alt": directives.unchanged,
"width": directives.unchanged,
"height": directives.unchanged,
"autoplay": directives.flag,
"nocontrols": directives.flag,
}
def run(self) -> list[video]:
"""Return the nodes generated from this directive."""
alt = get_option(self.options, "alt", "Video")
width = get_option(self.options, "width", "")
height = get_option(self.options, "height", "")
autoplay = get_option(self.options, "autoplay", False)
nocontrols = get_option(self.options, "nocontrols", False)
return [
video(
path=self.arguments[0],
alt=alt,
width=width,
height=height,
autoplay=autoplay,
nocontrols=nocontrols,
)
]
def visit_video_node(self: HTMLTranslator, node: video) -> None:
"""Return an HTML block when the video node is visited."""
extension = Path(node["path"]).suffix[1:]
html_block = """
<video {width} {height} {nocontrols} {autoplay} preload="metadata">
<source src="{path}" type="video/{filetype}">
{alt}
</video>
""".format(
width='width="' + node["width"] + '"' if node["width"] else "",
height='height="' + node["height"] + '"' if node["height"] else "",
path=node["path"],
filetype=extension,
alt=node["alt"],
autoplay="autoplay" if node["autoplay"] else "",
nocontrols="" if node["nocontrols"] else "controls",
)
self.body.append(html_block)
def depart_video_node(self: HTMLTranslator, node: video) -> None:
"""Do nothing when departing a video node."""
The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `def setup(app: Sphinx) -> None` to solve the following problem:
Register this extensions with sphinx.
Here is the function:
def setup(app: Sphinx) -> None:
"""Register this extensions with sphinx."""
app.add_node(
video,
html=(visit_video_node, depart_video_node),
# Do nothing for text & latex output - they do not support vidoe
latex=(depart_video_node, depart_video_node),
text=(depart_video_node, depart_video_node),
)
app.add_directive("video", Video) | Register this extensions with sphinx. |
6,781 | from __future__ import annotations
from prompt_toolkit.filters import Condition
from euporie.core.current import get_app
def get_app() -> BaseApp:
"""Get the current active (running) Application."""
from euporie.core.app import BaseApp
session = _current_app_session.get()
if isinstance(session.app, BaseApp):
return session.app
# Use a baseapp as our "DummyApplication"
return BaseApp()
The provided code snippet includes necessary dependencies for implementing the `notebook_has_focus` function. Write a Python function `def notebook_has_focus() -> bool` to solve the following problem:
Determine if there is a currently focused notebook.
Here is the function:
def notebook_has_focus() -> bool:
"""Determine if there is a currently focused notebook."""
from euporie.notebook.tabs.notebook import Notebook
return isinstance(get_app().tab, Notebook) | Determine if there is a currently focused notebook. |
6,782 | from __future__ import annotations
from prompt_toolkit.filters import Condition
from euporie.core.current import get_app
def get_app() -> BaseApp:
"""Get the current active (running) Application."""
from euporie.core.app import BaseApp
session = _current_app_session.get()
if isinstance(session.app, BaseApp):
return session.app
# Use a baseapp as our "DummyApplication"
return BaseApp()
The provided code snippet includes necessary dependencies for implementing the `deleted_cells` function. Write a Python function `def deleted_cells() -> bool` to solve the following problem:
Determine if there ares cell in the undo buffer.
Here is the function:
def deleted_cells() -> bool:
"""Determine if there ares cell in the undo buffer."""
from euporie.notebook.tabs.notebook import Notebook
nb = get_app().tab
if isinstance(nb, Notebook):
return bool(nb.undo_buffer)
return False | Determine if there ares cell in the undo buffer. |
6,783 | from __future__ import annotations
from prompt_toolkit.filters import Condition
from euporie.core.current import get_app
def get_app() -> BaseApp:
"""Get the current active (running) Application."""
from euporie.core.app import BaseApp
session = _current_app_session.get()
if isinstance(session.app, BaseApp):
return session.app
# Use a baseapp as our "DummyApplication"
return BaseApp()
The provided code snippet includes necessary dependencies for implementing the `code_cell_selected` function. Write a Python function `def code_cell_selected() -> bool` to solve the following problem:
Determine if a code cell is selected.
Here is the function:
def code_cell_selected() -> bool:
"""Determine if a code cell is selected."""
from euporie.notebook.tabs.notebook import Notebook
nb = get_app().tab
if isinstance(nb, Notebook):
for cell in nb.cells:
if cell.cell_type == "code":
return True
return False | Determine if a code cell is selected. |
6,784 | from __future__ import annotations
from prompt_toolkit.filters import Condition
from euporie.core.current import get_app
def get_app() -> BaseApp:
"""Get the current active (running) Application."""
from euporie.core.app import BaseApp
session = _current_app_session.get()
if isinstance(session.app, BaseApp):
return session.app
# Use a baseapp as our "DummyApplication"
return BaseApp()
The provided code snippet includes necessary dependencies for implementing the `cell_has_focus` function. Write a Python function `def cell_has_focus() -> bool` to solve the following problem:
Determine if there is a currently focused cell.
Here is the function:
def cell_has_focus() -> bool:
"""Determine if there is a currently focused cell."""
from euporie.notebook.tabs.notebook import Notebook
nb = get_app().tab
if isinstance(nb, Notebook):
return nb.cell is not None
return False | Determine if there is a currently focused cell. |
6,785 | from __future__ import annotations
from prompt_toolkit.filters import Condition
from euporie.core.current import get_app
def get_app() -> BaseApp:
"""Get the current active (running) Application."""
from euporie.core.app import BaseApp
session = _current_app_session.get()
if isinstance(session.app, BaseApp):
return session.app
# Use a baseapp as our "DummyApplication"
return BaseApp()
The provided code snippet includes necessary dependencies for implementing the `in_edit_mode` function. Write a Python function `def in_edit_mode() -> bool` to solve the following problem:
Determine if there is a currently focused notebook.
Here is the function:
def in_edit_mode() -> bool:
"""Determine if there is a currently focused notebook."""
from euporie.notebook.tabs.notebook import Notebook
nb = get_app().tab
if isinstance(nb, Notebook):
return nb.edit_mode
return False | Determine if there is a currently focused notebook. |
6,786 | from __future__ import annotations
import io
import logging
import os
import sys
from functools import partial
from typing import TYPE_CHECKING, cast
from prompt_toolkit.layout.containers import DynamicContainer, FloatContainer, Window
from prompt_toolkit.output.defaults import create_output
from prompt_toolkit.output.vt100 import Vt100_Output
from upath import UPath
from euporie.core.app import BaseApp, get_app
from euporie.core.config import add_setting
from euporie.core.io import PseudoTTY
from euporie.core.key_binding.registry import register_bindings
from euporie.preview.tabs.notebook import PreviewNotebook
class PreviewApp(BaseApp):
"""Preview app.
Preview notebook files in the terminal.
Outputs a formatted notebook file. The formatted output will be written to
the the output file path given by `output_file` (the standard output by
default).
"""
name = "preview"
def __init__(self, **kwargs: Any) -> None:
"""Create an app for dumping a prompt-toolkit layout."""
# Set default arguments
kwargs.setdefault("title", "euporie-preview")
kwargs.setdefault("leave_graphics", True)
kwargs.setdefault("full_screen", False)
kwargs.setdefault("max_render_postpone_time", 0)
kwargs.setdefault("min_redraw_interval", 0)
kwargs.setdefault("extend_renderer_height", True)
# Adjust options if we are paging output
if self.config.page:
kwargs.setdefault("set_title", False)
kwargs.setdefault("extend_renderer_width", True)
# Initialise the application
super().__init__(**kwargs)
# We want the app to close when rendering is complete
# self.after_render += self.pre_exit
# Do not load any key bindings
self.bindings_to_load.append("euporie.preview.app.PreviewApp")
# Select the first tab after files are opened
self.pre_run_callables.append(partial(setattr, self, "tab_idx", 0))
def get_file_tab(self, path: Path) -> type[Tab]:
"""Return the tab to use for a file path."""
return PreviewNotebook
def exit(
self,
result: _AppResult | None = None,
exception: BaseException | type[BaseException] | None = None,
style: str = "",
) -> None:
"""Optionally pipe the output to a pager on exit."""
# Display pager if needed
if self.config.page:
from pydoc import pager
output_file = getattr(self.output, "output_file") # noqa: B009
if output_file is not None:
output_file.seek(0)
data = output_file.read()
pager(data)
if exception is not None:
super().exit(exception=exception, style=style)
elif result is not None:
super().exit(result=result, style=style)
else:
super().exit()
def load_container(self) -> FloatContainer:
"""Return a container with all opened tabs."""
return FloatContainer(
DynamicContainer(lambda: self.tab or Window()),
floats=cast("list[Float]", self.floats),
)
def cleanup_closed_tab(self, tab: Tab) -> None:
"""Exit if all tabs are closed."""
super().cleanup_closed_tab(tab)
if not self.tabs:
self._is_running = False
self.exit()
self.draw(render_as_done=True)
def load_output(cls) -> Output:
"""Load the output.
Depending on the application configuration, will set the output to a file, to
stdout, or to a temporary file so the output can be displayed in a pager.
Returns:
A container for notebook output
"""
output_file: TextIO
if cls.config.page:
# Use a temporary file as display output if we are going to page the output
from tempfile import TemporaryFile
output_file = TemporaryFile("w+")
# Make this file look like a tty so we get colorful output
output_file = cast("TextIO", PseudoTTY(output_file, isatty=True))
else:
# If we are not paging output, determine where to print it
if cls.config.output_file is None or str(cls.config.output_file) in (
"-",
"/dev/stdout",
):
output_file = sys.stdout
elif str(cls.config.output_file) == "/dev/stderr":
output_file = sys.stderr
else:
try:
output_file = cls.config.output_file.open("w+")
except (
FileNotFoundError,
PermissionError,
io.UnsupportedOperation,
) as error:
log.error(error)
log.error(
"Output file `%s` cannot be opened. "
"Standard output will be used.",
cls.config.output_file,
)
output_file = sys.stdout
# Make the output look like a TTY if color-depth has been configured
if not output_file.isatty() and cls.config.color_depth is not None:
output_file = cast(
"TextIO",
PseudoTTY(
output_file,
isatty=True,
),
)
# Ensure we do not receive the "Output is not a terminal" message
Vt100_Output._fds_not_a_terminal.add(output_file.fileno())
# Set environment variable to disable character position requests
os.environ["PROMPT_TOOLKIT_NO_CPR"] = "1"
# Create a default output - this detects the terminal type
# Do not use stderr instead of stdout if stdout is not a tty
output = create_output(cast("TextIO", output_file), always_prefer_tty=False)
# Use the width and height of stderr (this gives us the terminal size even if
# output is being piped to a non-tty)
setattr( # noqa B010
output, "get_size", create_output(stdout=sys.stderr).get_size
)
# Attach the output file to the output in case we need to page it
setattr(output, "output_file", output_file) # noqa B010
return output
def _redraw(self, render_as_done: bool = False) -> None:
"""Ensure the output is always rendered as done."""
# import time
# time.sleep(0.1)
super()._redraw(render_as_done=True)
# ################################### Settings ####################################
add_setting(
name="output_file",
flags=["--output-file"],
nargs="?",
default="-",
const="-",
type_=UPath,
help_="Output path when previewing file",
description="""
When set to a file path, the formatted output will be written to the
given path. If no value is given (or the default "-" is passed) output
will be printed to standard output.
""",
)
add_setting(
name="page",
flags=["--page"],
type_=bool,
help_="Pass output to pager",
default=False,
description="""
Whether to pipe output to the system pager when previewing a notebook.
""",
)
# ################################# Key Bindings ##################################
register_bindings(
{
"euporie.preview.app.PreviewApp": {
"quit": ["c-c", "c-q"],
}
}
)
The provided code snippet includes necessary dependencies for implementing the `get_preview_app` function. Write a Python function `def get_preview_app() -> PreviewApp` to solve the following problem:
Get the current application.
Here is the function:
def get_preview_app() -> PreviewApp:
"""Get the current application."""
return cast("PreviewApp", get_app()) | Get the current application. |
6,787 | from __future__ import annotations
import contextvars
from itertools import chain
from threading import Thread
from typing import TYPE_CHECKING, Sequence, TypeVar, overload
from prompt_toolkit.mouse_events import MouseButton, MouseEventType
The provided code snippet includes necessary dependencies for implementing the `on_click` function. Write a Python function `def on_click(func: Callable) -> MouseHandler` to solve the following problem:
Return a mouse handler which call a given function on click.
Here is the function:
def on_click(func: Callable) -> MouseHandler:
"""Return a mouse handler which call a given function on click."""
def _mouse_handler(mouse_event: MouseEvent) -> NotImplementedOrNone:
if (
mouse_event.button == MouseButton.LEFT
and mouse_event.event_type == MouseEventType.MOUSE_UP
):
return func()
return NotImplemented
return _mouse_handler | Return a mouse handler which call a given function on click. |
6,788 | from __future__ import annotations
import contextvars
from itertools import chain
from threading import Thread
from typing import TYPE_CHECKING, Sequence, TypeVar, overload
from prompt_toolkit.mouse_events import MouseButton, MouseEventType
The provided code snippet includes necessary dependencies for implementing the `run_in_thread_with_context` function. Write a Python function `def run_in_thread_with_context( func: Callable, *args: Any, daemon: bool = True, **kwargs: Any ) -> None` to solve the following problem:
Run a function in an thread, but make sure it uses the same contextvars. This is required so that the function will see the right application.
Here is the function:
def run_in_thread_with_context(
func: Callable, *args: Any, daemon: bool = True, **kwargs: Any
) -> None:
"""Run a function in an thread, but make sure it uses the same contextvars.
This is required so that the function will see the right application.
"""
Thread(
target=contextvars.copy_context().run,
args=(func, *args),
kwargs=kwargs,
daemon=daemon,
).start() | Run a function in an thread, but make sure it uses the same contextvars. This is required so that the function will see the right application. |
6,789 | from __future__ import annotations
import logging
import mimetypes
from functools import lru_cache
from typing import TYPE_CHECKING
from upath import UPath
from upath._stat import UPathStatResult
from upath.implementations.http import HTTPPath
MIME_FORMATS = {
"image/svg+xml": "svg",
"image/png": "png",
"image/jpeg": "jpeg",
"image/gif": "gif",
"application/pdf": "pdf",
"text/html": "html",
"text/latex": "latex",
"application/x-latex": "latex",
"text/markdown": "markdown",
"text/x-markdown": "markdown",
"text/*": "ansi",
"stream/std*": "ansi",
"*": "ansi",
}
def get_mime(path: Path | str) -> str | None:
"""Attempt to determine the mime-type of a path."""
if isinstance(path, str):
path = UPath(path)
try:
path = path.resolve()
except Exception:
log.debug("Cannot resolve '%s'", path)
mime = None
# Read from path of data URI
if path.exists() and isinstance(stat := path.stat(), UPathStatResult):
mime = stat.as_info().get("mimetype")
# If we have a web-address, ensure we have a url
# Check http-headers and ensure we have a url
if not mime and isinstance(path, HTTPPath) and path._url is not None:
from fsspec.asyn import sync
# Get parsed url
url = path._url.geturl()
# Get the fsspec fs
fs = path.fs
# Ensure we have a session
session = sync(fs.loop, fs.set_session)
# Use HEAD requests if the server allows it, falling back to GETs
for method in (session.head, session.get):
r = sync(fs.loop, method, url, allow_redirects=True)
try:
r.raise_for_status()
except Exception:
log.debug("Request failed: %s", r)
continue
else:
content_type = r.headers.get("Content-Type")
if content_type is not None:
mime = content_type.partition(";")[0]
break
# Try using magic
if not mime:
try:
import magic
except ModuleNotFoundError:
pass
else:
try:
with path.open(mode="rb") as f:
mime = magic.from_buffer(f.read(2048), mime=True)
except FileNotFoundError:
pass
# Guess from file-extension
if not mime and path.suffix:
# Check for Jupyter notebooks by extension
if path.suffix == ".ipynb":
return "application/x-ipynb+json"
else:
mime, _ = mimetypes.guess_type(path)
return mime
The provided code snippet includes necessary dependencies for implementing the `get_format` function. Write a Python function `def get_format(path: Path | str, default: str = "") -> str` to solve the following problem:
Attempt to guess the format of a path.
Here is the function:
def get_format(path: Path | str, default: str = "") -> str:
"""Attempt to guess the format of a path."""
if isinstance(path, str):
path = UPath(path)
if not default:
default = "html" if isinstance(path, HTTPPath) else "ansi"
mime = get_mime(path)
return MIME_FORMATS.get(mime, default) if mime else default | Attempt to guess the format of a path. |
6,790 | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, NamedTuple
from prompt_toolkit.cache import FastDictCache, SimpleCache
from prompt_toolkit.filters import to_filter
class Converter(NamedTuple):
"""Hold a conversion function and its weight."""
func: Callable
filter_: Filter
weight: int = 1
converters: dict[str, dict[str, list[Converter]]] = {}
The provided code snippet includes necessary dependencies for implementing the `register` function. Write a Python function `def register( from_: Iterable[str] | str, to: str, filter_: FilterOrBool = True, weight: int = 1, ) -> Callable` to solve the following problem:
Add a converter to the centralized format conversion system.
Here is the function:
def register(
from_: Iterable[str] | str,
to: str,
filter_: FilterOrBool = True,
weight: int = 1,
) -> Callable:
"""Add a converter to the centralized format conversion system."""
if isinstance(from_, str):
from_ = (from_,)
def decorator(func: Callable) -> Callable:
if to not in converters:
converters[to] = {}
for from_format in from_:
if from_format not in converters[to]:
converters[to][from_format] = []
converters[to][from_format].append(
Converter(func=func, filter_=to_filter(filter_), weight=weight)
)
return func
return decorator | Add a converter to the centralized format conversion system. |
6,791 | from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from euporie.core.convert.registry import register
from euporie.core.filters import have_modules
_HTML2TEXT_TABLE_RE = r"(?:(?:.*\|)+[^|]*?(?:\n|$))+"
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `html_to_markdown_py_html2text` function. Write a Python function `async def html_to_markdown_py_html2text( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert HTML to markdown tables using :py:mod:`html2text`.
Here is the function:
async def html_to_markdown_py_html2text(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert HTML to markdown tables using :py:mod:`html2text`."""
import re
from html2text import HTML2Text
parser = HTML2Text(bodywidth=0)
data = datum.data
markup = data.decode() if isinstance(data, bytes) else data
result = parser.handle(markup)
# Fix for html2text issue with empty first cells in table rows
remaining_result = result
replaced = ""
for match in re.finditer(_HTML2TEXT_TABLE_RE, result, re.MULTILINE):
# Add string before the table
replaced += remaining_result[
: match.start() - (len(result) - len(remaining_result))
]
remaining_result = result[match.end() :]
table = result[match.start() : match.end()]
table_rows = table.strip().split("\n")
row_lens = [len(row.split("|")) for row in table_rows]
max_row_len = max(row_lens)
table = "\n".join(
"|" * (max_row_len - row_lens[i] + 1) + row
for i, row in enumerate(table_rows)
)
replaced += table + "\n"
replaced += remaining_result
return replaced | Convert HTML to markdown tables using :py:mod:`html2text`. |
6,792 | from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from euporie.core.convert.registry import register
from euporie.core.filters import have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `html_to_markdown_py_mtable` function. Write a Python function `async def html_to_markdown_py_mtable( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert HTML tables to markdown tables using :py:mod:`mtable`.
Here is the function:
async def html_to_markdown_py_mtable(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert HTML tables to markdown tables using :py:mod:`mtable`."""
from mtable import MarkupTable
data = datum.data
markup = data.decode() if isinstance(data, bytes) else data
return "\n\n".join([table.to_md() for table in MarkupTable.from_html(markup)]) | Convert HTML tables to markdown tables using :py:mod:`mtable`. |
6,793 | from __future__ import annotations
import base64
from typing import TYPE_CHECKING
from euporie.core.convert.registry import register
import base64
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `bytes_to_base64_py` function. Write a Python function `async def bytes_to_base64_py( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert bytes to base64 encoded data.
Here is the function:
async def bytes_to_base64_py(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert bytes to base64 encoded data."""
data = datum.data
if isinstance(data, str):
data = data.encode()
return base64.b64encode(data).decode().replace("\n", "").strip() | Convert bytes to base64 encoded data. |
6,794 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `html_to_ansi_w3m` function. Write a Python function `async def html_to_ansi_w3m( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert HTML text to formatted ANSI using :command:`w3m`.
Here is the function:
async def html_to_ansi_w3m(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert HTML text to formatted ANSI using :command:`w3m`."""
cmd: list[Any] = ["w3m", "-T", "text/html"]
if cols is not None:
cmd += ["-cols", str(cols)]
return (await call_subproc(datum.data.encode(), cmd)).decode() | Convert HTML text to formatted ANSI using :command:`w3m`. |
6,795 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `html_to_ansi_elinks` function. Write a Python function `async def html_to_ansi_elinks( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert HTML text to formatted ANSI using :command:`elinks`.
Here is the function:
async def html_to_ansi_elinks(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert HTML text to formatted ANSI using :command:`elinks`."""
cmd: list[Any] = [
"elinks",
"-dump",
"-dump-color-mode",
"3",
"-no-numbering",
"-force-html",
"-no-references",
]
if cols is not None:
cmd += ["-dump-width", cols]
return (await call_subproc(datum.data.encode(), cmd)).decode() | Convert HTML text to formatted ANSI using :command:`elinks`. |
6,796 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `html_to_ansi_lynx` function. Write a Python function `async def html_to_ansi_lynx( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert HTML text to formatted ANSI using :command:`lynx`.
Here is the function:
async def html_to_ansi_lynx(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert HTML text to formatted ANSI using :command:`lynx`."""
cmd: list[Any] = ["lynx", "-dump", "-stdin"]
if cols is not None:
cmd += [f"-width={cols}"]
return (await call_subproc(datum.data.encode(), cmd)).decode() | Convert HTML text to formatted ANSI using :command:`lynx`. |
6,797 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `html_to_ansi_links` function. Write a Python function `async def html_to_ansi_links( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert HTML text to formatted ANSI using :command:`links`.
Here is the function:
async def html_to_ansi_links(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert HTML text to formatted ANSI using :command:`links`."""
cmd: list[Any] = ["links", "-dump"]
if cols is not None:
cmd += ["-width", cols]
return (await call_subproc(datum.data.encode(), cmd, use_tempfile=True)).decode() | Convert HTML text to formatted ANSI using :command:`links`. |
6,798 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `html_to_ansi_py_htmlparser` function. Write a Python function `async def html_to_ansi_py_htmlparser( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert HTML tables to ANSI text using :py:mod:`HTMLParser`.
Here is the function:
async def html_to_ansi_py_htmlparser(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert HTML tables to ANSI text using :py:mod:`HTMLParser`."""
import io
import re
from html.parser import HTMLParser
class HTMLStripper(HTMLParser):
"""Very basic HTML parser which strips style and script tags."""
def __init__(self) -> None:
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs = True
self.text = io.StringIO()
self.skip = False
self.skip_tags = ("script", "style")
def handle_starttag(
self, tag: str, attrs: list[tuple[str, str | None]]
) -> None:
if tag in self.skip_tags:
self.skip = True
def handle_endtag(self, tag: str) -> None:
if tag in self.skip_tags:
self.skip = False
def handle_data(self, d: str) -> None:
if not self.skip:
self.text.write(d)
def get_data(self) -> str:
return self.text.getvalue()
stripper = HTMLStripper()
stripper.feed(datum.data)
output = stripper.get_data()
# Strip lines
output = "\n".join([x.strip() for x in output.strip().split("\n")])
# Remove empty paragraphs
return re.sub("\n\n\n+", "\n\n", output) | Convert HTML tables to ANSI text using :py:mod:`HTMLParser`. |
6,799 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `latex_to_ansi_py_flatlatex` function. Write a Python function `async def latex_to_ansi_py_flatlatex( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert LaTeX to ANSI using :py:mod:`flatlatex`.
Here is the function:
async def latex_to_ansi_py_flatlatex(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert LaTeX to ANSI using :py:mod:`flatlatex`."""
import flatlatex
return flatlatex.converter().convert(datum.data.strip().strip("$").strip()) | Convert LaTeX to ANSI using :py:mod:`flatlatex`. |
6,800 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `latex_to_ansi_py_pylatexenc` function. Write a Python function `async def latex_to_ansi_py_pylatexenc( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert LaTeX to ANSI using :py:mod:`pylatexenc`.
Here is the function:
async def latex_to_ansi_py_pylatexenc(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert LaTeX to ANSI using :py:mod:`pylatexenc`."""
from pylatexenc.latex2text import LatexNodes2Text
return LatexNodes2Text().latex_to_text(datum.data.strip().strip("$").strip()) | Convert LaTeX to ANSI using :py:mod:`pylatexenc`. |
6,801 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
log = logging.getLogger(__name__)
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
class stdout_to_log:
"""A decorator which captures standard output and logs it."""
def __init__(
self, log: logging.Logger, output: str = "Literal['stdout','stderr']"
) -> None:
"""Create a new instance of the capturing context manager.
Args:
log: The logger to send the output to
output: Whether to capture the standard output or the standard error
"""
self.log = log
self.out = StringIO()
self.output = output
self._original: TextIO | None = None
def __enter__(self) -> None:
"""Intercept the standard output when entering the context manager."""
if self.output == "stderr":
self._original = sys.stderr
sys.stderr = self.out
else:
self._original = sys.stdout
sys.stdout = self.out
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
exc_traceback: TracebackType | None,
) -> None:
"""Replace the standard output, and logs the captured output."""
assert self._original is not None
if self.output == "stderr":
sys.stderr = self._original
else:
sys.stdout = self._original
if exc_type is not None:
self.out.seek(0)
for line in self.out.readlines():
self.log.debug(str(line).strip())
self.out.close()
The provided code snippet includes necessary dependencies for implementing the `latex_to_ansi_py_sympy` function. Write a Python function `async def latex_to_ansi_py_sympy( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert LaTeX to ANSI using :py:mod:`sympy`.
Here is the function:
async def latex_to_ansi_py_sympy(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert LaTeX to ANSI using :py:mod:`sympy`."""
from sympy import pretty
from sympy.parsing.latex import parse_latex
from sympy.parsing.latex.errors import LaTeXParsingError
from euporie.core.log import stdout_to_log
with stdout_to_log(log):
try:
parsed = parse_latex(datum.data.strip().strip("$").strip())
except LaTeXParsingError:
parsed = datum.data
return pretty(parsed) | Convert LaTeX to ANSI using :py:mod:`sympy`. |
6,802 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
def set_background(image: PilImage, bg: str | None = None) -> PilImage:
"""Remove the alpha channel from an image and set the background colour."""
from PIL import Image
if image.mode in ("RGBA", "LA") or (
image.mode == "P" and "transparency" in image.info
):
alpha = image.convert("RGBA").getchannel("A")
bg_img = Image.new("RGBA", image.size, bg or "#000")
bg_img.paste(image, mask=alpha)
image = bg_img
return image.convert("P", palette=Image.Palette.ADAPTIVE, colors=16).convert(
"RGB", palette=Image.Palette.ADAPTIVE, colors=16
)
from_=("png", "jpeg", "gif"),
to="pil",
filter_=have_modules("PIL"),
)
def get_app() -> BaseApp:
"""Get the current active (running) Application."""
from euporie.core.app import BaseApp
session = _current_app_session.get()
if isinstance(session.app, BaseApp):
return session.app
# Use a baseapp as our "DummyApplication"
return BaseApp()
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `pil_to_ansi_py_timg` function. Write a Python function `async def pil_to_ansi_py_timg( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert a PIL image to ANSI text using :py:mod:`timg`.
Here is the function:
async def pil_to_ansi_py_timg(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert a PIL image to ANSI text using :py:mod:`timg`."""
import timg
data = datum.data
px, py = get_app().term_info.cell_size_px
# Calculate rows based on image aspect ratio
w, h = data.size
if rows is None and cols is not None:
w, h = data.size
rows = ceil(cols / w * h)
elif cols is None and rows is not None:
w, h = data.size
cols = ceil(rows / h * w)
elif rows is None and cols is None:
cols = ceil(w / px)
rows = ceil(h / py)
assert rows is not None
assert cols is not None
# `timg` assumes a 2x1 terminal cell aspect ratio, so we correct for while
# resizing the image
data = data.resize((cols, ceil(rows * 2 * (px / py) / 0.5)))
if bg:
data = set_background(data, bg)
return timg.Ansi24HblockMethod(data).to_string() | Convert a PIL image to ANSI text using :py:mod:`timg`. |
6,803 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `pil_to_ansi_py_img2unicode` function. Write a Python function `async def pil_to_ansi_py_img2unicode( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert a PIL image to ANSI text using :py:mod:`img2unicode`.
Here is the function:
async def pil_to_ansi_py_img2unicode(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert a PIL image to ANSI text using :py:mod:`img2unicode`."""
import io
from img2unicode import FastQuadDualOptimizer, Renderer
output = io.StringIO()
Renderer(FastQuadDualOptimizer(), max_w=cols, max_h=rows).render_terminal(
datum.data, output
)
output.seek(0)
return output.read() | Convert a PIL image to ANSI text using :py:mod:`img2unicode`. |
6,804 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `image_to_ansi_timg` function. Write a Python function `async def image_to_ansi_timg( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert image data to ANSI text using :command:`timg`.
Here is the function:
async def image_to_ansi_timg(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert image data to ANSI text using :command:`timg`."""
cmd: list[Any] = ["timg"]
if cols is not None and rows is not None:
cmd += [f"-g{cols}x{cols}"]
cmd += ["--compress", "-pq", "--threads=-1", "-"]
return (await call_subproc(datum.data, cmd)).decode() | Convert image data to ANSI text using :command:`timg`. |
6,805 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `image_to_ansi_catimg` function. Write a Python function `async def image_to_ansi_catimg( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert image data to ANSI text using :command:`catimg`.
Here is the function:
async def image_to_ansi_catimg(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert image data to ANSI text using :command:`catimg`."""
cmd: list[Any] = ["catimg"]
if cols is not None and rows is not None:
cmd += ["-w", cols * 2]
cmd += ["-"]
return (await call_subproc(datum.data, cmd)).decode() | Convert image data to ANSI text using :command:`catimg`. |
6,806 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `image_to_ansi_icat` function. Write a Python function `async def image_to_ansi_icat( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert image data to ANSI text using :command:`icat`.
Here is the function:
async def image_to_ansi_icat(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert image data to ANSI text using :command:`icat`."""
cmd: list[Any] = ["icat"]
if cols is not None and rows is not None:
cmd += ["-w", cols]
cmd += ["--mode", "24bit", "-"]
return (await call_subproc(datum.data, cmd)).decode() | Convert image data to ANSI text using :command:`icat`. |
6,807 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `image_to_ansi_tiv` function. Write a Python function `async def image_to_ansi_tiv( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert image data to ANSI text using :command:`tiv`.
Here is the function:
async def image_to_ansi_tiv(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert image data to ANSI text using :command:`tiv`."""
cmd: list[Any] = ["tiv"]
if cols is not None and rows is not None:
cmd += ["-w", cols, "-h", rows]
return (await call_subproc(datum.data, cmd, use_tempfile=True)).decode() | Convert image data to ANSI text using :command:`tiv`. |
6,808 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `image_to_ansi_viu` function. Write a Python function `async def image_to_ansi_viu( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert image data to ANSI text using :command:`viu`.
Here is the function:
async def image_to_ansi_viu(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert image data to ANSI text using :command:`viu`."""
cmd: list[Any] = ["viu", "-b"]
if cols is not None and rows is not None:
cmd += ["-w", cols]
cmd += ["-s", "-"]
return (await call_subproc(datum.data, cmd)).decode() | Convert image data to ANSI text using :command:`viu`. |
6,809 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `image_to_ansi_jp2a` function. Write a Python function `async def image_to_ansi_jp2a( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert image data to ANSI text using :command:`jp2a`.
Here is the function:
async def image_to_ansi_jp2a(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert image data to ANSI text using :command:`jp2a`."""
cmd: list[Any] = ["jp2a", "--color"]
if cols is not None and rows is not None:
cmd += [f"--height={rows}"]
cmd += ["-"]
return (await call_subproc(datum.data, cmd)).decode() | Convert image data to ANSI text using :command:`jp2a`. |
6,810 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `png_to_ansi_img2txt` function. Write a Python function `async def png_to_ansi_img2txt( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert PNG data to ANSI text using :command:`img2txt`.
Here is the function:
async def png_to_ansi_img2txt(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert PNG data to ANSI text using :command:`img2txt`."""
cmd: list[Any] = ["img2txt"]
if cols is not None and rows is not None:
cmd += ["-W", cols, "-H", rows]
return (await call_subproc(datum.data, cmd, use_tempfile=True)).decode() | Convert PNG data to ANSI text using :command:`img2txt`. |
6,811 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
RoundedLine = LineStyle("Rounded", rank=(1, 5), parent=ThinLine)
The provided code snippet includes necessary dependencies for implementing the `png_to_ansi_py_placeholder` function. Write a Python function `async def png_to_ansi_py_placeholder( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Draw placeholder ANSI text.
Here is the function:
async def png_to_ansi_py_placeholder(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Draw placeholder ANSI text."""
from euporie.core.border import RoundedLine
if cols is None:
cols = 7
if rows is None:
rows = 3
lines = []
B = RoundedLine.grid
lines.append(f"{B.TOP_LEFT}{B.TOP_MID * max(5, (cols - 2))}{B.TOP_RIGHT}")
lines += [f"{B.MID_LEFT}{B.MID_MID * (cols - 2)}{B.MID_RIGHT}"] * ((rows - 3) // 2)
lines.append(f"{B.MID_LEFT}{'Image'.center(cols - 2)}{B.MID_RIGHT}")
lines += [f"{B.MID_LEFT}{B.MID_MID * (cols - 2)}{B.MID_RIGHT}"] * (
(rows - 3) - (rows - 3) // 2
)
lines.append(f"{B.BOTTOM_LEFT}{B.BOTTOM_MID * max(5, (cols - 2))}{B.BOTTOM_RIGHT}")
return "\n".join(lines) | Draw placeholder ANSI text. |
6,812 | from __future__ import annotations
import logging
from functools import partial
from math import ceil
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import chafa_convert_cmd, chafa_convert_py
from euporie.core.convert.formats.pil import set_background
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
)
The provided code snippet includes necessary dependencies for implementing the `rich_to_ansi_py` function. Write a Python function `async def rich_to_ansi_py( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert rich objects to formatted ANSI text.
Here is the function:
async def rich_to_ansi_py(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert rich objects to formatted ANSI text."""
import rich
console = rich.get_console()
options = console.options
if cols is not None:
options = options.update(max_width=cols)
buffer = console.render(datum.data, options)
return console._render_buffer(buffer) | Convert rich objects to formatted ANSI text. |
6,813 | from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from markdown_it import MarkdownIt
from mdit_py_plugins.amsmath import amsmath_plugin
from mdit_py_plugins.dollarmath.index import dollarmath_plugin
from mdit_py_plugins.texmath.index import texmath_plugin
from pygments import highlight
from pygments.formatters import HtmlFormatter
from euporie.core.convert.registry import register
from euporie.core.current import get_app
from euporie.core.lexers import detect_lexer
markdown_parser = (
(
MarkdownParser(
options_update={
"highlight": lambda text, language, lang_args: highlight(
text,
detect_lexer(text, language=language),
HtmlFormatter(
nowrap=True,
noclasses=True,
style=(
app.syntax_theme
if hasattr((app := get_app()), "syntax_theme")
else "default"
),
),
)
}
)
.enable("linkify")
.enable("table")
.enable("strikethrough")
)
.use(texmath_plugin)
.use(dollarmath_plugin)
.use(amsmath_plugin)
# .use(tasklists_plugin)
)
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `markdown_to_html_markdown_it` function. Write a Python function `async def markdown_to_html_markdown_it( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert markdown to HTML using :py:mod:`markdownit_py`.
Here is the function:
async def markdown_to_html_markdown_it(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert markdown to HTML using :py:mod:`markdownit_py`."""
assert markdown_parser is not None
data = datum.data
markup = data.decode() if isinstance(data, bytes) else data
return markdown_parser.render(markup) | Convert markdown to HTML using :py:mod:`markdownit_py`. |
6,814 | from __future__ import annotations
import asyncio
from functools import partial
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import base64_to_bytes_py, imagemagick_convert
from euporie.core.convert.registry import register
from euporie.core.filters import command_exists, have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `latex_to_png_dvipng` function. Write a Python function `async def latex_to_png_dvipng( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, timeout: int = 2, ) -> bytes | None` to solve the following problem:
Render LaTeX as a png image using :command:`dvipng`. Borrowed from IPython.
Here is the function:
async def latex_to_png_dvipng(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
timeout: int = 2,
) -> bytes | None:
"""Render LaTeX as a png image using :command:`dvipng`.
Borrowed from IPython.
"""
import shutil
import subprocess
import tempfile
from pathlib import Path
latex_doc = (
r"\documentclass{article}\pagestyle{empty}\begin{document}"
+ datum.data
+ r"\end{document}"
)
workdir = Path(tempfile.mkdtemp())
with workdir.joinpath("tmp.tex").open("w", encoding="utf8") as f:
f.writelines(latex_doc)
# Convert hex color to latex color
if fg and len(fg) == 4:
fg = f"#{fg[1]}{fg[1]}{fg[2]}{fg[2]}{fg[3]}{fg[3]}"
fg_latex = (
f"RGB {int(fg[1:3], 16)} {int(fg[3:5], 16)} {int(fg[5:7], 16)}" if fg else ""
)
# Convert latex document to dvi image, then Convert dvi image to png
try:
proc = await asyncio.create_subprocess_exec(
*["latex", "-halt-on-error", "-interaction", "batchmode", "tmp.tex"],
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL,
cwd=workdir,
)
await asyncio.wait_for(proc.wait(), timeout)
dvipng_cmd = [
"dvipng",
"-T",
"tight",
"-D",
"175",
"-z",
"9",
"-bg",
"Transparent",
"-o",
"/dev/stdout",
"tmp.dvi",
]
if fg:
dvipng_cmd += ["-fg", fg_latex]
proc = await asyncio.create_subprocess_exec(
*dvipng_cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
cwd=workdir,
)
output, _ = await asyncio.wait_for(proc.communicate(), timeout)
except (subprocess.CalledProcessError, TimeoutError):
return None
finally:
# Clean up temporary folder
shutil.rmtree(workdir)
return output | Render LaTeX as a png image using :command:`dvipng`. Borrowed from IPython. |
6,815 | from __future__ import annotations
import asyncio
from functools import partial
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import base64_to_bytes_py, imagemagick_convert
from euporie.core.convert.registry import register
from euporie.core.filters import command_exists, have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `latex_to_png_py_mpl` function. Write a Python function `async def latex_to_png_py_mpl( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> bytes` to solve the following problem:
Render LaTeX as a png image using :py:module:`matplotlib`. Borrowed from IPython.
Here is the function:
async def latex_to_png_py_mpl(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> bytes:
"""Render LaTeX as a png image using :py:module:`matplotlib`.
Borrowed from IPython.
"""
from io import BytesIO
from matplotlib import figure, font_manager, mathtext
from matplotlib.backends import backend_agg
# mpl mathtext doesn't support display math, force inline
data = datum.data.strip().replace("$$", "$")
if not data.startswith("$"):
data = f"${data}$"
buffer = BytesIO()
prop = font_manager.FontProperties(size=12)
parser = mathtext.MathTextParser("path")
width, height, depth, _, _ = parser.parse(data, dpi=72, prop=prop)
fig = figure.Figure(figsize=(width / 72, height / 72))
fig.text(0, depth / height, data, fontproperties=prop, color=fg, usetex=False)
backend_agg.FigureCanvasAgg(fig)
fig.savefig(buffer, dpi=120, format="png", transparent=True)
return buffer.getvalue() | Render LaTeX as a png image using :py:module:`matplotlib`. Borrowed from IPython. |
6,816 | from __future__ import annotations
import asyncio
from functools import partial
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import base64_to_bytes_py, imagemagick_convert
from euporie.core.convert.registry import register
from euporie.core.filters import command_exists, have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `pil_to_png_py_pil` function. Write a Python function `async def pil_to_png_py_pil( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> bytes` to solve the following problem:
Convert a pillow image to sixels :py:mod:`teimpy`.
Here is the function:
async def pil_to_png_py_pil(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> bytes:
"""Convert a pillow image to sixels :py:mod:`teimpy`."""
import io
with io.BytesIO() as output:
datum.data.save(output, format="PNG")
return output.getvalue() | Convert a pillow image to sixels :py:mod:`teimpy`. |
6,817 | from __future__ import annotations
import asyncio
from functools import partial
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import base64_to_bytes_py, imagemagick_convert
from euporie.core.convert.registry import register
from euporie.core.filters import command_exists, have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `svg_to_png_py_cairosvg` function. Write a Python function `async def svg_to_png_py_cairosvg( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert SVG to PNG using :py:mod:`cairosvg`.
Here is the function:
async def svg_to_png_py_cairosvg(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert SVG to PNG using :py:mod:`cairosvg`."""
import cairosvg
data = datum.data
markup = data.decode() if isinstance(data, bytes) else data
return cairosvg.surface.PNGSurface.convert(markup, write_to=None) | Convert SVG to PNG using :py:mod:`cairosvg`. |
6,818 | from __future__ import annotations
from typing import TYPE_CHECKING
from euporie.core.convert.registry import register
from euporie.core.filters import have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `latex_to_svg_py_ziamath` function. Write a Python function `async def latex_to_svg_py_ziamath( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert LaTeX to SVG using :py:mod:`ziamath`.
Here is the function:
async def latex_to_svg_py_ziamath(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert LaTeX to SVG using :py:mod:`ziamath`."""
import ziamath as zm
data = datum.data.strip()
if not data.startswith("$"):
data = f"$${data}$$"
latex = zm.Text(data, color=fg, size=12)
return latex.svg() | Convert LaTeX to SVG using :py:mod:`ziamath`. |
6,819 | from __future__ import annotations
import logging
from functools import partial
from typing import TYPE_CHECKING
from prompt_toolkit.cache import SimpleCache
from prompt_toolkit.formatted_text import to_formatted_text
from euporie.core.convert.registry import register
from euporie.core.ft.ansi import ANSI
from euporie.core.ft.utils import strip_one_trailing_newline
from euporie.core.lexers import detect_lexer
_html_cache: SimpleCache[str, HTML] = SimpleCache(maxsize=20)
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
class HTML:
"""A HTML formatted text renderer.
Accepts a HTML string and renders it at a given width.
"""
def __init__(
self,
markup: str,
base: Path | str | None = None,
width: int | None = None,
height: int | None = None,
collapse_root_margin: bool = False,
fill: bool = True,
css: CssSelectors | None = None,
browser_css: CssSelectors | None = None,
mouse_handler: Callable[[Node, MouseEvent], NotImplementedOrNone] | None = None,
paste_fixed: bool = True,
defer_assets: bool = False,
on_update: Callable[[HTML], None] | None = None,
on_change: Callable[[HTML], None] | None = None,
_initial_format: str = "",
) -> None:
"""Initialize the markdown formatter.
Args:
markup: The markdown text to render
base: The base url for the HTML dom
width: The width in characters available for rendering. If :py:const:`None`
the terminal width will be used
height: The width in characters available for rendering. If :py:const:`None`
the terminal height will be used
collapse_root_margin: If :py:const:`True`, margins of the root element will
be collapsed
fill: Whether remaining space in block elements should be filled
css: Base CSS to apply when rendering the HTML
browser_css: The browser CSS to use
mouse_handler: A mouse handler function to use when links are clicked
paste_fixed: Whether fixed elements should be pasted over the output
defer_assets: Whether to render the page before remote assets are loaded
on_update: An optional callback triggered when the DOM updates
on_change: An optional callback triggered when the DOM changes
_initial_format: The initial format of the data being displayed
"""
self.markup = markup.strip()
self.base = UPath(base or ".")
self.title = ""
self.browser_css = browser_css or _BROWSER_CSS
self.css: CssSelectors = css or {}
self.defer_assets = defer_assets
self.render_count = 0
self.width = width
self.height = height
self.fill = fill
self.collapse_root_margin = collapse_root_margin
self.paste_fixed = paste_fixed
self._initial_format = _initial_format
self.graphic_data: set[Datum] = set()
self.mouse_handler = mouse_handler
self.formatted_text: StyleAndTextTuples = []
self.floats: dict[tuple[int, DiBool, DiInt], StyleAndTextTuples] = {}
self.fixed: dict[tuple[int, DiBool, DiInt], StyleAndTextTuples] = {}
self.fixed_mask: StyleAndTextTuples = []
# self.anchors = []
self.on_update = Event(self, on_update)
self.on_change = Event(self, on_change)
self._dom_processed = False
self._assets_loaded = False
self._url_cbs: dict[Path, Callable[[Any], None]] = {}
self._url_fs_map: dict[Path, AbstractFileSystem] = {}
# Lazily load attributes
def parser(self) -> CustomHTMLParser:
"""Load the HTML parser."""
return CustomHTMLParser(self)
def soup(self) -> Node:
"""Parse the markup."""
return self.parser.parse(self.markup)
def process_dom(self) -> None:
"""Load CSS styles and image resources.
Do not touch element's themes!
"""
def _process_css(data: bytes) -> None:
try:
css_str = data.decode()
except Exception:
log.warning("Error decoding stylesheet '%s...'", data[:20])
else:
parse_style_sheet(css_str, self)
def _process_img(child: Node, data: bytes) -> None:
child.attrs["_data"] = data
del child.attrs["_missing"]
for child in self.soup.descendents:
# Set base
if child.name == "base":
if href := child.attrs.get("href"):
self.base = self.base.joinuri(href)
# Set title
elif child.name == "title":
if contents := child.contents:
self.title = contents[0]._text.strip()
self.on_update.fire()
# In case of a <link> style, load the url
elif (
child.name == "link"
and (
(attrs := child.attrs).get("rel") == "stylesheet"
or (attrs.get("rel") == "preload" and attrs.get("as") == "style")
)
and (href := attrs.get("href", ""))
):
url = self.base.joinuri(href)
fs, url = url_to_fs(str(url))
self._url_fs_map[url] = fs
self._url_cbs[url] = _process_css
# In case of a <style> tab, load first child's text
elif child.name == "style":
if child.contents:
# Use unprocessed text attribute to avoid loading the element's theme
css_str = child.contents[0]._text
parse_style_sheet(css_str, self)
# Load images
elif child.name == "img" and (src := child.attrs.get("src")):
child.attrs["_missing"] = "true"
url = self.base.joinuri(src)
fs, url = url_to_fs(str(url))
self._url_fs_map[url] = fs
self._url_cbs[url] = partial(_process_img, child)
self._dom_processed = True
async def load_assets(self) -> None:
"""Load remote assets asynchronously."""
self._assets_loaded = True
# Load all remote assets for each protocol using fsspec (where they are loaded
# asynchronously in their own thread) and trigger callbacks if the file is
# loaded successfully
fs_url_map = {
fs: [url for url, f in self._url_fs_map.items() if f == fs]
for fs in set(self._url_fs_map.values())
}
for fs, urls in fs_url_map.items():
try:
results = fs.cat(urls, recursive=False, on_error="return")
except Exception:
log.warning("Error connecting to %s", fs)
else:
# for url, result in zip(urls, results.values()):
for url, result in results.items():
if not isinstance(result, Exception):
log.debug("File %s loaded", url)
self._url_cbs[url](result)
else:
log.warning("Error loading %s", url)
# Reset all nodes so they will update with the new CSS from assets
if self.defer_assets:
self.soup.reset()
def render(self, width: int | None, height: int | None) -> StyleAndTextTuples:
"""Render the current markup at a given size."""
loop = get_loop()
future = asyncio.run_coroutine_threadsafe(self._render(width, height), loop)
return future.result()
async def _render(
self, width: int | None, height: int | None
) -> StyleAndTextTuples:
"""Render the current markup at a given size, asynchronously."""
# log.debug("Rendering at (%d, %d)", width, height)
no_w = width is None and self.width is None
no_h = height is None and self.height is None
if no_w or no_h:
size = get_app_session().output.get_size()
if no_w:
width = size.columns
if no_h:
height = size.rows
if width is not None:
self.width = width
if height is not None:
self.height = height
assert self.width is not None
assert self.height is not None
# The soup gets parsed when we load assets, and asset data gets attached to it
if not self._dom_processed:
self.process_dom()
if not self.defer_assets and not self._assets_loaded:
await self.load_assets()
ft = await self.render_element(
self.soup,
available_width=self.width,
available_height=self.height,
fill=self.fill,
)
# Apply "ReverseOverwrite"s
ft = apply_reverse_overwrites(ft)
# Apply floats and fixed elements
def _paste_floats(
floats: dict[tuple[int, DiBool, DiInt], StyleAndTextTuples],
lower_ft: StyleAndTextTuples,
) -> StyleAndTextTuples:
"""Paste floats on top of rendering."""
lower_ft_height = None
for (_, anchors, position), float_ft in sorted(floats.items()):
row = col = 0
if anchors.top:
row = position.top
elif anchors.bottom:
if lower_ft_height is None:
lower_ft_height = sum(1 for _ in split_lines(lower_ft))
row = (
lower_ft_height
- sum(1 for _ in split_lines(float_ft))
- position.bottom
)
if anchors.left:
col = position.left
elif anchors.right:
row = (
max_line_width(lower_ft)
- max_line_width(float_ft)
- position.right
)
lower_ft = paste(float_ft, lower_ft, row, col)
return lower_ft
# Draw floats
if self.floats:
ft = _paste_floats(self.floats, ft)
# Paste floats onto a mask, then onto the rendering if required
if self.fixed:
assert self.width is not None
assert self.height is not None
fixed_mask = cast(
"StyleAndTextTuples", [("", (" " * self.width) + "\n")] * self.height
)
fixed_mask = _paste_floats(self.fixed, fixed_mask)
fixed_mask = apply_reverse_overwrites(fixed_mask)
if self.paste_fixed:
ft = paste(fixed_mask, ft, 0, 0, transparent=True)
self.fixed_mask = fixed_mask
else:
self.fixed_mask = []
self.render_count += 1
self.formatted_text = ft
# Load assets after initial render and requuest a re-render when loaded
if self.defer_assets and not self._assets_loaded:
loop = asyncio.get_event_loop()
task = loop.create_task(self.load_assets())
task.add_done_callback(lambda fut: self.on_change.fire())
return ft
async def render_element(
self,
element: Node,
available_width: int,
available_height: int,
left: int = 0,
fill: bool = True,
align_content: bool = True,
) -> StyleAndTextTuples:
"""Render a Node."""
# Update the element theme with the available space
element.theme.update_space(available_width, available_height)
# Render the contents
if element.theme.d_table:
render_func = self.render_table_content
elif element.theme.d_list_item:
render_func = self.render_list_item_content
elif element.theme.d_grid:
render_func = self.render_grid_content
elif element.theme.latex:
render_func = self.render_latex_content
else:
name = element.name.lstrip(":")
render_func = getattr(
self, f"render_{name}_content", self.render_node_content
)
# Render the element
ft = await render_func(element, left, fill, align_content)
# Format the contents
ft = await self.format_element(ft, element, left, fill, align_content)
return ft
async def render_text_content(
self,
element: Node,
left: int = 0,
fill: bool = True,
align_content: bool = True,
) -> StyleAndTextTuples:
"""Render a text element.
Args:
element: The page element to render
left: The position on the current line at which to render the output - used
to indent subsequent lines when rendering inline blocks like images
fill: Whether to fill the remainder of the rendered space with whitespace
align_content: Whether to align the element's content
Returns:
Formatted text
"""
ft: StyleAndTextTuples = []
text = await element.theme.text_transform(element.text)
if parent_theme := element.theme.parent_theme:
style = parent_theme.style
else:
style = ""
ft.append((style, text))
return ft
async def render_details_content(
self,
element: Node,
left: int = 0,
fill: bool = True,
align_content: bool = True,
) -> StyleAndTextTuples:
"""Render details, showing summary at the top and hiding contents if closed."""
ft = []
theme = element.theme
summary = None
contents = []
for child in element.contents:
if not summary and child.name == "summary":
summary = child
else:
contents.append(child)
if summary:
ft += await self.render_element(
summary,
available_width=theme.content_width,
available_height=theme.content_height,
left=left,
fill=fill,
)
if "open" in element.attrs and contents:
# Create a dummy node with non-summary children and render it
node = Node(dom=self, name="::content", parent=element, contents=contents)
ft += [("", "\n")]
ft += await self.render_element(
node,
available_width=theme.content_width,
available_height=theme.content_height,
left=left,
fill=fill,
)
return ft
async def render_ol_content(
self,
element: Node,
left: int = 0,
fill: bool = True,
align_content: bool = True,
) -> StyleAndTextTuples:
"""Render lists, adding item numbers to child <li> elements."""
# Assign a list index to each item. This can be set via the 'value' attributed
_curr = 0
for item in element.find_all("li"):
_curr += 1
_curr = int(item.attrs.setdefault("value", str(_curr)))
# Render list as normal
return await self.render_node_content(
element=element,
left=left,
fill=fill,
align_content=align_content,
)
render_ul_content = render_ol_content
async def render_list_item_content(
self,
element: Node,
left: int = 0,
fill: bool = True,
align_content: bool = True,
) -> StyleAndTextTuples:
"""Render a list item."""
# Get element theme
theme = element.theme
# Get the bullet style
list_style = theme.list_style_type
bullet = list_style
if list_style == "decimal":
bullet = f"{element.attrs['value']}."
# Add bullet element
if bullet:
bullet_element = Node(dom=self, name="::marker", parent=element)
bullet_element.contents.append(
Node(dom=self, name="::text", parent=bullet_element, text=bullet)
)
if theme.list_style_position == "inside":
element.contents.insert(0, bullet_element)
else:
element.marker = bullet_element
# Render the list item
ft = await self.render_node_content(
element,
left=left,
fill=fill,
align_content=align_content,
)
return ft
async def render_table_content(
self,
element: Node,
left: int = 0,
fill: bool = True,
align_content: bool = True,
) -> StyleAndTextTuples:
"""Render a HTML table element.
Args:
element: The list of parsed elements to render
left: The position on the current line at which to render the output - used
to indent subsequent lines when rendering inline blocks like images
fill: Whether to fill the remainder of the rendered space with whitespace
align_content: Whether to align the element's content
Returns:
Formatted text
"""
ft = []
table_theme = element.theme
table_x_dim = Dimension(
min=table_theme.min_width,
preferred=table_theme.content_width if "width" in table_theme else None,
max=table_theme.max_width or table_theme.content_width,
)
table = Table(
width=table_x_dim,
expand="width" in table_theme,
align=table_theme.text_align,
style=table_theme.style,
padding=DiInt(0, 0, 0, 0),
border_line=table_theme.border_line,
border_style=table_theme.border_style,
border_visibility=table_theme.border_visibility,
)
td_map = {}
# Stack the elements in the shape of the table
async def render_rows(elements: list[Node]) -> None:
for tr in elements:
if tr.name == "tr":
tr_theme = tr.theme
# tr_theme.update_space(available_width, available_height)
row = table.new_row(
align=tr_theme.text_align,
style=tr_theme.style,
border_line=tr_theme.border_line,
border_style=tr_theme.border_style,
border_visibility=tr_theme.border_visibility,
)
for td in tr.contents:
if td.name in ("th", "td"):
td_theme = td.theme
td_theme.update_space(
table_theme.content_width
or table_theme.available_width,
table_theme.content_height
or table_theme.available_width,
)
cell = row.new_cell(
text=await self.render_node_content(
td,
left=0,
align_content=False,
fill=False,
),
padding=td_theme.padding,
border_line=td_theme.border_line,
border_style=td_theme.border_style,
align=td_theme.text_align,
colspan=try_eval(td.attrs.get("colspan", 1)),
rowspan=try_eval(td.attrs.get("rowspan", 1)),
style=td_theme.style + " nounderline",
width=td_theme.width if "width" in td_theme else None,
border_visibility=td_theme.border_visibility,
)
# Save for later so we can add the contents once all the
# cells are created and we can calculate the cell widths
td_map[cell] = td
# Render the table head
for child in element.find_all("thead", recursive=False):
await render_rows(child.contents)
# Render the table body
for child in element.find_all("tbody", recursive=False):
await render_rows(child.contents)
# Render rows not in a head / body / foot as part of the body
await render_rows(element.contents)
for child in element.find_all("tfoot", recursive=False):
await render_rows(child.contents)
# TODO - process <colgroup> elements
# Add cell contents
if td_map:
col_widths = table.calculate_col_widths()
for row in table.rows:
for col_width, cell in zip(col_widths, row.cells):
if td := td_map.get(cell):
cell_padding = compute_padding(cell)
available_width = (
table_x_dim.max
if cell.colspan > 1
else col_width - cell_padding.left - cell_padding.right
)
td.theme.update_space(
available_width, table_theme.available_height
)
cell.text = await self.render_node_content(
td,
# TODO - get actual colspan cell widths properly
left=0,
)
# Render the table
ft_table = table.render()
# Render the caption
# TODO - support "caption-side" css
captions = element.find_all("caption", recursive=False)
if captions:
table_width = max_line_width(ft_table)
for child in captions:
ft_caption = await self.render_element(
child,
available_width=table_width,
available_height=table_theme.available_height,
left=0,
fill=True,
)
if ft_caption:
ft.extend(ft_caption)
ft.extend(ft_table)
return ft
async def render_grid_content(
self,
element: Node,
left: int = 0,
fill: bool = True,
align_content: bool = True,
) -> StyleAndTextTuples:
"""Render a element with ``display`` set to ``grid``.
Args:
element: The list of parsed elements to render
left: The position on the current line at which to render the output - used
to indent subsequent lines when rendering inline blocks like images
fill: Whether to fill the remainder of the rendered space with whitespace
align_content: Whether to align the element's content
Returns:
Formatted text
"""
theme = element.theme
theme_style = theme.style
table = Table(
style=theme.style,
padding=DiInt(0, 0, 0, 0),
width=theme.content_width,
expand=False,
background_style=theme_style,
)
available_width = theme.content_width
available_height = theme.content_height
# TODO - calculate grid row heights
col_width_template, _row_height_template = theme.grid_template
grid_areas = theme.grid_areas or {0: {}}
n_cols = max(len(col_width_template), len(grid_areas[0]))
td_map = {}
# Assign items to their positions in the table
## Sort children into those assigned a grid area and those unassigned
child_areas: dict[str, Node] = {}
remainder = []
for child in element.contents: # Sort by CSS `order
if ga := child.theme.grid_area:
child_areas[ga] = child
else:
remainder.append(child)
## Place children assigned a grid area their positions
done = set()
for y, grid_row in grid_areas.items():
for x, area in grid_row.items():
if area not in done and area in child_areas:
child = child_areas.pop(area)
colspan = 1
for i in range(x + 1, len(grid_row)):
if grid_row[i] == area:
colspan += 1
else:
break
rowspan = 1
for i in range(y + 1, len(grid_areas)):
if grid_areas[i][x + colspan - 1] == area:
rowspan += 1
else:
break
# Add a cell child to the table for this child
cell = Cell(
colspan=colspan,
rowspan=rowspan,
style=child.theme.style,
border_line=NoLine,
border_style=theme_style,
)
table._rows[y].add_cell(cell, index=x)
td_map[cell] = child
done.add(area)
table.sync_rows_to_cols()
## Place children without a grid area wherever they fit
x = y = 0
for child in sorted((*child_areas.values(), *remainder)):
child_theme = child.theme
# Skip whitespace
if not child_theme.in_flow:
continue
colspan = child_theme.grid_column_span
# Find next available cell
# Or skip cells to get to the child's assigned column
# or skip cells until cell fits
col_start = child_theme.grid_column_start
while (
x in table._rows[y]._cells
or (x > 0 and (x + colspan) > n_cols)
or (col_start is not None and x != col_start)
):
x += 1
if x >= n_cols:
y += 1
x = 0
# Add a cell child to the table for this child
cell = Cell(
colspan=colspan,
style=child_theme.style,
border_line=NoLine,
border_style=theme_style,
)
table._rows[y].add_cell(cell, index=x)
td_map[cell] = child
table.sync_rows_to_cols()
# Remove outer-most cell borders, and set grid gap
gap_x, gap_y = map(bool, theme.gap)
border_visibility = DiBool(gap_y, gap_x, gap_y, gap_x)
for rowcols, directionss in (
(table._cols.values(), ("top", "bottom")),
(table._rows.values(), ("left", "right")),
):
for rowcol in rowcols:
if cells := rowcol._cells:
for func, direction in zip((min, max), directionss):
cell = cells[func(cells)]
cell.border_visibility = (
cell.border_visibility or border_visibility
)._replace(**{direction: False})
# Calculate column widths
n_cols = len(table._cols)
available = available_width - gap_x * (n_cols - 1)
col_widths = {}
frs = {}
for x, item in enumerate(col_width_template):
if item.endswith("fr"):
frs[x] = css_dimension(item, available=available) or 1
# TODO
elif item == "min-content":
content_widths = []
for cell in table._cols[x]._cells.values():
if cell in td_map:
content_widths.append(td_map[cell].theme.min_content_width)
if content_widths:
col_widths[x] = max(content_widths)
# elif item == "max-content":
# content_widths = []
# for cell in table._cols[x]._cells.values():
# if cell in td_map:
# content_widths.append(td_map[cell].theme.max_content_width)
# if content_widths:
# col_widths[x] = max(content_widths)
# elif item.startwith("min-max"): # TODO
elif (
value := css_dimension(item, vertical=False, available=available)
) is not None:
col_widths[x] = round(value)
else:
# Give remaining columns one fraction
frs[x] = 1
# Any undefined columns are min-content
for x in set(range(n_cols)) - col_widths.keys() - frs.keys():
frs[x] = 1
## Divide reminder between `fr` columns
if frs:
fr_available = available - sum(filter(None, col_widths.values()))
fr_count = sum(frs.values())
for x, value in frs.items():
col_widths[x] = round(value / fr_count * fr_available)
# Add remainder to last column to ensure column widths sum to available space
if n_cols:
col_widths[n_cols - 1] += available - sum(col_widths.values())
# Set cell widths
for row in table._rows.values():
for x, cell in row._cells.items():
colspan = cell.colspan
cell.width = sum(col_widths[x + i] for i in range(colspan)) + gap_x * (
colspan - 1
)
# Allow the table to adjust given cell widths to the available space
cell_widths = table.calculate_cell_widths(available_width)
# Render cell contents at the final calculated widths
async def _render_cell(cell: Cell, td: Node, width: int, height: int) -> None:
cell.text = await self.render_element(
td,
available_width=width,
available_height=height,
left=0,
fill=True,
align_content=align_content,
)
coros = []
for row in table._rows.values():
for cell in row._cells.values():
if td := td_map.get(cell):
width = cell_widths[cell]
td.theme.update_space(width, theme.available_height)
coros.append(
_render_cell(
cell, td, width or available_width, available_height
)
)
await asyncio.gather(*coros)
return table.render()
async def render_latex_content(
self,
element: Node,
left: int = 0,
fill: bool = True,
align_content: bool = True,
) -> StyleAndTextTuples:
"""Render LaTeX math content."""
theme = element.theme
# Render text representation
ft: StyleAndTextTuples = await self.render_node_content(
element, left, fill, align_content
)
# Render graphic representation
latex = element.text + "".join(
child.text for child in element.renderable_descendents
)
latex = f"${latex}$"
if element.theme.d_blocky:
latex = f"${latex}$"
datum = Datum(
latex,
"latex",
fg=theme.color or None,
bg=theme.background_color or None,
align=WindowAlign.CENTER,
)
self.graphic_data.add(datum)
# Calculate size and pad text representation
cols, aspect = await datum.cell_size_async()
rows = max(len(list(split_lines(ft))), ceil(cols * aspect))
cols = max(cols, max_line_width(ft))
key = datum.add_size(Size(rows, cols))
ft = [(f"[Graphic_{key}]", ""), *ft]
ft = valign(ft, height=rows, how=FormattedTextVerticalAlign.TOP)
return ft
async def _render_image(
self, data: bytes, format_: str, theme: Theme, path: Path | None = None
) -> StyleAndTextTuples:
...
async def _render_image(
self, data: str, format_: str, theme: Theme, path: Path | None = None
) -> StyleAndTextTuples:
...
async def _render_image(self, data, format_, theme, path=None):
"""Render an image and prepare graphic representation."""
datum = Datum(
data,
format_,
path=path,
)
# Keep reference to this graphic
self.graphic_data.add(datum)
# Scale down the image to fit to width
cols, aspect = await datum.cell_size_async()
if content_width := theme.content_width:
cols = content_width if cols == 0 else min(content_width, cols)
rows = ceil(cols * aspect)
# Convert the image to formatted-text
ft = (
await datum.convert_async(
to="ft",
cols=cols,
rows=rows or None,
fg=theme.color,
bg=theme.background_color,
)
or []
)
# Remove trailing new-lines
ft = strip(ft, chars="\n", left=False)
# If we couldn't calculate the image size, use the size of the text output
if rows == 0 and cols:
rows = min(theme.content_height, len(list(split_lines(ft))))
aspect = rows / cols
# Store reference to image element
key = Datum.add_size(datum, Size(rows, cols))
return cast(
"StyleAndTextTuples",
# Flag graphic position
[(f"[Graphic_{key}]", "")]
# Set default background color on generated content
+ [(f"{theme.style} {style}", (text), *rest) for style, text, *rest in ft],
)
async def render_img_content(
self,
element: Node,
left: int = 0,
fill: bool = True,
align_content: bool = True,
) -> StyleAndTextTuples:
"""Render an image's content."""
theme = element.theme
content_width = theme.content_width
# content_height = theme.content_height
src = str(element.attrs.get("src", ""))
path = self.base.joinuri(src)
if not element.attrs.get("_missing") and (data := element.attrs.get("_data")):
# Display it graphically
format_ = get_format(path, default="png")
ft = await self._render_image(data, format_, theme, path)
else:
style = f"class:image,placeholder {theme.style}"
ft = [(style, "🌄")]
if content_width and content_width >= 7:
ft.extend(
[
(style, " "),
(
style,
(
element.attrs.get("alt")
or (path.name if path else "Image")
),
),
]
)
return ft
async def render_svg_content(
self,
element: Node,
left: int = 0,
fill: bool = True,
align_content: bool = True,
) -> StyleAndTextTuples:
"""Display images rendered as ANSI art."""
theme = element.theme
# HTMLParser clobber the case of element attributes
element.attrs["xmlns"] = "http://www.w3.org/2000/svg"
element.attrs["xmlns:xlink"] = "http://www.w3.org/1999/xlink"
# We fix the SVG viewBox here
data = element._outer_html().replace(" viewbox=", " viewBox=")
# Replace "currentColor" with theme foreground color
data = data.replace("currentColor", theme.color)
ft = await self._render_image(data, "svg", theme)
return ft
async def render_input_content(
self,
element: Node,
left: int = 0,
fill: bool = True,
align_content: bool = True,
) -> StyleAndTextTuples:
"""Render an input element."""
attrs = element.attrs
element.contents.insert(
0,
Node(
dom=self,
name="::text",
parent=element,
text=attrs.get("value", attrs.get("placeholder", " ")) or " ",
),
)
ft = await self.render_node_content(
element,
left=left,
fill=fill,
align_content=align_content,
)
return ft
async def render_node_content(
self,
element: Node,
left: int = 0,
fill: bool = True,
align_content: bool = True,
) -> StyleAndTextTuples:
"""Generate flows for the contents of the element."""
ft: StyleAndTextTuples = []
ft_left: StyleAndTextTuples
ft_middle: StyleAndTextTuples
ft_right: StyleAndTextTuples
empty: StyleAndTextTuples = []
line_height = 1
baseline = 0
parent_theme = element.theme
d_blocky = d_inline = d_inline_block = False
float_lines_left: list[StyleAndTextTuples] = []
float_width_left = 0
float_lines_right: list[StyleAndTextTuples] = []
float_width_right = 0
content_width = parent_theme.content_width
new_line: StyleAndTextTuples = []
def flush() -> None:
"""Add the current line to the rendered output."""
nonlocal new_line, ft, left, line_height, baseline
if new_line:
# Pad the new-line to form an alignable block
new_line = pad(new_line, style=parent_theme.style)
if ft:
# Combine with the output
ft = join_lines([ft, new_line]) if ft else new_line
else:
ft = new_line
# Reset the new line
left = 0
line_height = 1
baseline = 0
new_line = []
available_width = parent_theme.content_width
available_height = parent_theme.content_height
coros = {}
for child in element.renderable_descendents:
theme = child.theme
if theme.skip:
continue
# Render the element
coros[child] = self.render_element(
child,
available_width=available_width,
available_height=available_height,
left=0,
fill=fill,
align_content=align_content,
)
renderings = await asyncio.gather(*coros.values())
# Render each child node
for child, rendering in zip(coros, renderings):
# Start a new line if we encounter a <br> element
if child.name == "br":
flush()
continue
# If the rendering was empty, move on
if not rendering:
continue
theme = child.theme
# We will start a new line if the previous item was a block
if ft and d_blocky and last_char(ft) != "\n":
line_height = 1
left = 0
baseline = 0
d_blocky = theme.d_blocky
d_inline = theme.d_inline
d_inline_block = theme.d_inline_block
preformatted = theme.preformatted
# If the rendering was a positioned absolutely or fixed, store it and draw it later
if theme.theme["position"] == "fixed":
self.fixed[(theme.z_index, theme.anchors, theme.position)] = rendering
# if theme.theme["position"] == "absolute":
# self.floats[(theme.z_index, theme.anchors, theme.position)] = rendering
# if theme.theme["position"] == "relative":
# ... TODO ..
elif theme.floated == "right":
lines = []
for ft_left, ft_right in zip_longest(
split_lines(pad(rendering, style=theme.style)),
float_lines_right,
fillvalue=empty,
):
lines.append([*ft_left, *ft_right])
float_lines_right = lines
float_width_right = fragment_list_width(float_lines_right[0])
continue
elif theme.floated == "left":
lines = []
for ft_left, ft_right in zip_longest(
lines,
split_lines(pad(rendering, style=theme.style)),
fillvalue=empty,
):
lines.append([*ft_left, *ft_right])
float_lines_left = lines
float_width_left = fragment_list_width(float_lines_left[0])
continue
# If the rendering was inline, add it to the end of the last line of the
# current output. This might involve re-aligning the last line in the
# output, which could have been an inline-block
elif d_inline and (
# parent_theme.d_inline or parent_theme.d_inline_block or preformatted
parent_theme.d_inline or preformatted
):
new_line.extend(rendering)
elif d_inline or d_inline_block:
if d_inline:
tokens = list(fragment_list_to_words(rendering))
else:
tokens = [rendering]
for token in tokens:
token_lines = list(split_lines(token))
token_width = max(fragment_list_width(line) for line in token_lines)
token_height = len(token_lines)
# Deal with floats
float_width_right = (
fragment_list_width(float_lines_right[0])
if float_lines_right
else 0
)
float_width_left = (
fragment_list_width(float_lines_left[0])
if float_lines_left
else 0
)
# If we have floats, transform the current new line and add one row
# from each active float
if (
new_line
and (content_width - float_width_left - float_width_right)
- left
- token_width
< 0
):
new_rows = list(split_lines(new_line))
new_line_width = max(
fragment_list_width(line) for line in new_rows
)
transformed_rows = []
for ft_left, ft_middle, ft_right in zip_longest(
float_lines_left[:line_height],
(
pad(
row,
char=" ",
style=parent_theme.style,
width=new_line_width,
)
for row in new_rows
),
float_lines_right[:line_height],
fillvalue=empty,
):
line_width = (
content_width
- fragment_list_width(ft_left)
- fragment_list_width(ft_right)
)
transformed_rows.append(
[
*ft_left,
*align(
ft_middle,
how=parent_theme.text_align,
width=line_width,
style=parent_theme.style,
placeholder="",
),
*ft_right,
]
)
float_lines_left = float_lines_left[line_height:]
float_lines_right = float_lines_right[line_height:]
# Manually flush the transformed lines
if ft:
ft = join_lines([ft, *transformed_rows])
else:
ft = join_lines(transformed_rows)
baseline = 0
new_rows = [[]]
left = 0
line_height = 1
new_line = []
if line_height == token_height == 1 or not new_line:
new_line.extend(token)
new_rows = [new_line]
baseline = int(theme.vertical_align * (token_height - 1))
line_height = max(line_height, token_height)
else:
new_line, baseline = concat(
ft_a=new_line,
ft_b=token,
baseline_a=baseline,
baseline_b=int(theme.vertical_align * (token_height - 1)),
style=parent_theme.style,
)
new_rows = list(split_lines(new_line))
line_height = len(new_rows)
left += token_width
# Otherwise we are rendering a block-like element, which gets added to the
# end of the output
else:
# Flush the latest line
flush()
# Start block elements on a new line
if ft and d_blocky and last_char(ft) != "\n":
ft.append(("", "\n"))
ft.extend(rendering)
# line_height = len(list(split_lines(rendering)))
# ft.extend(
# concat(
# concat(join_lines( float_lines_left[:line_height]), rendering)[0],
# join_lines(float_lines_right[:line_height]),
# )[0]
# )
# float_lines_left = float_lines_left[line_height:]
# float_lines_right = float_lines_right[line_height:]
line_height = 1
left = 0
# On "clear", draw the rest of the floats
if float_lines_left or float_lines_right:
for ft_left, ft_middle, ft_right in zip_longest(
float_lines_left,
split_lines(new_line[:]),
float_lines_right,
fillvalue=empty,
):
float_width_right = (
fragment_list_width(float_lines_right[0])
if float_lines_right
else 0
)
float_width_left = (
fragment_list_width(float_lines_left[0]) if float_lines_left else 0
)
line_width = (
content_width
- fragment_list_width(ft_left)
- fragment_list_width(ft_right)
)
row = [
*ft_left,
*align(
ft_middle,
how=parent_theme.text_align,
width=line_width,
style=parent_theme.style + " nounderline",
placeholder="",
),
*ft_right,
]
ft = join_lines([ft, row]) if ft else row
new_line = []
# Flush any current lines
flush()
# Draw flex elements
# if parent_theme.get("flex") and parent_theme.get("flex-direction") == "column":
# table = Table(border=Invisible, collapse_empty_borders=True)
# row = table.new_row()
# for output in outputs:
# row.new_cell(output)
# ft = table.render(available_width)
#
# else:
# ft = sum(outputs, start=ft)
return ft
async def format_element(
self,
ft: StyleAndTextTuples,
element: Node,
left: int = 0,
fill: bool = True,
align_content: bool = True,
) -> StyleAndTextTuples:
"""Format an element's content based on its theme."""
theme = element.theme
parent_theme = theme.parent_theme
d_blocky = theme.d_blocky
d_inline = theme.d_inline
d_inline_block = theme.d_inline_block
preformatted = theme.preformatted
content_width = theme.content_width
content_height = theme.content_height
# Apply style to inline elements
# if d_inline:
# ft = apply_style(ft, theme.style)
# If an element should not overflow it's width / height, truncate it
if not d_inline and not preformatted:
if theme.get("overflow_x") == "hidden":
ft = truncate(ft, content_width, placeholder="", ignore_whitespace=True)
elif theme.get("overflow_x") == "auto":
ft = truncate(
ft,
content_width,
placeholder="▹",
ignore_whitespace=True,
style=theme.style,
)
else:
ft = truncate(
ft,
content_width,
placeholder="",
ignore_whitespace=True,
style=theme.style,
)
# Truncate or expand the height
overflow_y = theme.get("overflow_y") in {"hidden", "auto"}
pad_height = d_blocky and theme.height is not None
if overflow_y or pad_height:
target_height = None
if (min_height := theme.min_height) and min_height > content_height:
target_height = min_height
if (max_height := theme.max_height) and max_height < content_height:
target_height = max_height
elif height := theme.height:
target_height = height
if target_height is not None:
# Truncate elements with hidden overflows
if overflow_y:
lines = []
for i, line in enumerate(split_lines(ft)):
if i <= target_height:
lines.append(line)
else:
lines = list(split_lines(ft))
# Pad height of block elements to theme height
if pad_height and len(lines) < target_height:
lines.extend([[]] * (target_height - len(lines)))
ft = join_lines(lines)
# Align content
if align_content and d_blocky:
alignment = theme.text_align
if alignment != FormattedTextAlign.LEFT:
ft = align(
ft,
alignment,
width=None if d_inline_block else content_width,
style=theme.style,
ignore_whitespace=True,
placeholder="",
)
# # Fill space around block elements so they fill the content width
if ft and ((fill and d_blocky and not theme.d_table) or d_inline_block):
pad_width = None
if d_blocky:
pad_width = content_width
elif d_inline_block:
pad_width = max_line_width(ft) if theme.width is None else content_width
if pad_width is not None:
style = theme.style
ft = pad(
ft,
width=round(pad_width),
char=" ",
style=style,
)
# Use the rendered content width from now on for inline elements
if d_inline_block or d_inline:
content_width = max_line_width(ft)
# Add padding & border
if d_blocky or d_inline_block:
padding = theme.padding
border_visibility = theme.border_visibility
if (any(padding) or any(border_visibility)) and not (
theme.d_table and theme.border_collapse
):
ft = add_border(
ft,
style=theme.style,
border_grid=theme.border_grid,
width=content_width if not ft else None,
border_visibility=border_visibility,
border_style=theme.border_style,
padding=padding,
)
# Draw borders and padding on text inside inline elements
elif element.name == "::text":
padding = theme.padding
border_visibility = theme.border_visibility
if (
padding.left
or padding.right
or border_visibility.left
or border_visibility.right
):
if not element.is_first_child_node:
border_visibility = border_visibility._replace(left=False)
padding = padding._replace(left=0)
if not element.is_last_child_node:
border_visibility = border_visibility._replace(right=False)
padding = padding._replace(right=0)
if any(padding) or any(border_visibility):
ft = add_border(
ft,
style=theme.style,
border_grid=theme.border_grid,
width=content_width if not ft else None,
border_visibility=border_visibility,
border_style=theme.border_style,
padding=padding,
)
# The "::marker" element is drawn in the margin, before any padding
# If the element has no margin, it can end up in the parent's padding
# We use [ReverseOverwrite] fragments to ensure the marker is ignored
# now and written over the margin later.
if element.marker is not None:
marker_ft = await self.render_element(
element.marker,
available_width=99999,
available_height=theme.available_height,
left=0,
fill=False,
align_content=False,
)
ft = [
*apply_style(marker_ft, "[ReverseOverwrite]"),
*ft,
]
parent_style = parent_theme.style if parent_theme else ""
# Render the margin
# if d_blocky and (alignment := theme.block_align) != FormattedTextAlign.LEFT:
if (alignment := theme.block_align) != FormattedTextAlign.LEFT:
# Center block contents if margin_left and margin_right are "auto"
ft = align(
ft,
how=alignment,
width=theme.available_width,
style=parent_style,
placeholder="",
)
elif any(margin := theme.margin):
ft = add_border(
ft=ft,
style=theme.style,
border_visibility=DiBool.from_value(False),
padding=margin,
padding_style=parent_style,
)
# Apply mouse handler to links
if (
(parent := element.parent)
and parent.name == "a"
and callable(handler := self.mouse_handler)
and (href := parent.attrs.get("href"))
):
element.attrs["_link_path"] = self.base.joinuri(href)
element.attrs["title"] = parent.attrs.get("title")
ft = cast(
"StyleAndTextTuples",
[
(style, text, *(rest or [partial(handler, element)]))
for style, text, *rest in ft
],
)
return ft
def __pt_formatted_text__(self) -> StyleAndTextTuples:
"""Return formatted text."""
if not self.formatted_text:
self.render(width=None, height=None)
return self.formatted_text
The provided code snippet includes necessary dependencies for implementing the `html_to_ft` function. Write a Python function `async def html_to_ft( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> StyleAndTextTuples` to solve the following problem:
Convert HTML to formatted text.
Here is the function:
async def html_to_ft(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> StyleAndTextTuples:
"""Convert HTML to formatted text."""
from euporie.core.ft.html import HTML
data = datum.data
markup = data.decode() if isinstance(data, bytes) else data
html = _html_cache.get(
datum.hash,
partial(
HTML,
markup,
width=cols,
base=datum.path,
collapse_root_margin=True,
fill=extend,
_initial_format=datum.root.format,
),
)
return await html._render(cols, rows) | Convert HTML to formatted text. |
6,820 | from __future__ import annotations
import logging
from functools import partial
from typing import TYPE_CHECKING
from prompt_toolkit.cache import SimpleCache
from prompt_toolkit.formatted_text import to_formatted_text
from euporie.core.convert.registry import register
from euporie.core.ft.ansi import ANSI
from euporie.core.ft.utils import strip_one_trailing_newline
from euporie.core.lexers import detect_lexer
log = logging.getLogger(__name__)
_WHITELISTED_LEXERS = {
"python",
"markdown",
"javascript",
"json",
}
class ANSI(PTANSI):
"""Convert ANSI text into formatted text, preserving all control sequences."""
def __init__(self, value: str, tab_size: int = 8) -> None:
"""Initiate the ANSI processor instance.
This replaces carriage returns to emulate terminal output.
Args:
value: The ANSI string to process.
tab_size: The number of spaces to use to represent a tab
"""
# Replace tabs with spaces
value = value.expandtabs(tabsize=tab_size)
# Replace windows style newlines
value = value.replace("\r\n", "\n")
# Remove anything before a carriage return if there is something after it to
# emulate a carriage return in the output
value = re.sub(r"^.*\r(?!\n)", "", value, count=0, flags=re.MULTILINE)
# Clear line by deleting previous characters
value = re.sub(r".*\x1b\[2K", "", value, count=0)
# Remove hide & show cursor commands
value = re.sub(r"\x1b\[\?25[hl]", "", value, count=0)
super().__init__(value)
def _parse_corot(self) -> Generator[None, str, None]:
"""Coroutine that parses the ANSI escape sequences.
This is modified version of the ANSI parser from prompt_toolkit retains
all CSI escape sequences.
Yields:
Accepts characters from a string.
"""
style = ""
formatted_text = self._formatted_text
while True:
char = yield
sequence = char
# Everything between \001 and \002 should become a ZeroWidthEscape.
if char == "\001":
sequence = ""
while char != "\002":
char = yield
if char == "\002":
formatted_text.append(("[ZeroWidthEscape]", sequence))
break
else:
sequence += char
continue
# Check for backspace
elif char == "\x08":
# TODO - remove last character from last non-ZeroWidthEscape fragment
if formatted_text:
formatted_text.pop()
continue
elif char in ("\x1b", "\x9b"):
# Got a CSI sequence, try to compile a control sequence
char = yield
# Check for sixels
if char == "P":
# Got as DEC code
sequence += char
# We expect "p1;p2;p3;q" + sixel data + "\x1b\"
char = yield
while char != "\x1b":
sequence += char
char = yield
sequence += char
char = yield
if ord(char) == 0x5C:
sequence += char
formatted_text.append(("[ZeroWidthEscape]", sequence))
# char = yield
continue
# Check for hyperlinks
elif char == "]":
sequence += char
char = yield
if char == "8":
sequence += char
char = yield
if char == ";":
sequence += char
char = yield
while True:
sequence += char
if sequence[-2:] == "\x1b\\":
break
char = yield
formatted_text.append(("[ZeroWidthEscape]", sequence))
continue
elif (char == "[" and sequence == "\x1b") or sequence == "\x9b":
if sequence == "\x1b":
sequence += char
char = yield
# Next are any number (including none) of "parameter bytes"
params = []
current = ""
while 0x30 <= ord(char) <= 0x3F:
# Parse list of integer parameters
sequence += char
if char.isdigit():
current += char
else:
params.append(min(int(current or 0), 9999))
if char == ";":
current = ""
char = yield
if current:
params.append(min(int(current or 0), 9999))
# then any number of "intermediate bytes"
while 0x20 <= ord(char) <= 0x2F:
sequence += char
char = yield
# finally by a single "final byte"
if 0x40 <= ord(char) <= 0x7E:
sequence += char
# Check if that escape sequence was a style:
if char == "m":
self._select_graphic_rendition(params)
style = self._create_style_string()
# Otherwise print a zero-width control sequence
else:
formatted_text.append(("[ZeroWidthEscape]", sequence))
continue
formatted_text.append((style, sequence))
def strip_one_trailing_newline(ft: StyleAndTextTuples) -> StyleAndTextTuples:
"""Remove up to one trailing new-line character from formatted text."""
for i in range(len(ft) - 1, -1, -1):
frag = ft[i]
if not frag[1]:
continue
if frag[1] == "\n":
del ft[i]
elif frag[1].endswith("\n"):
ft[i] = (frag[0], frag[1][:-1])
break
return ft
def detect_lexer(
text: str = "", path: Path | None = None, language: str = ""
) -> PygmentsLexerCls | None:
"""Detect the pygments lexer for a file."""
lexer = None
if path is not None:
try:
lexer = get_lexer_for_filename(path)
except ClassNotFound:
try:
lexer = guess_lexer_for_filename(path, text)
except ClassNotFound:
pass
if lexer is None and language:
try:
lexer = get_lexer_by_name(language)
except ClassNotFound:
pass
if lexer is None:
try:
lexer = guess_lexer(text)
except ClassNotFound:
pass
return lexer
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `ansi_to_ft` function. Write a Python function `async def ansi_to_ft( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, lex: bool = False, ) -> StyleAndTextTuples` to solve the following problem:
Convert ANSI text to formatted text, lexing & formatting automatically.
Here is the function:
async def ansi_to_ft(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
lex: bool = False,
) -> StyleAndTextTuples:
"""Convert ANSI text to formatted text, lexing & formatting automatically."""
data = datum.data
markup = data.decode() if isinstance(data, bytes) else data
ft: StyleAndTextTuples
if "\x1b" in markup or "\r" in markup:
ft = to_formatted_text(ANSI(markup.strip()))
else:
# Replace tabs with spaces
markup = markup.expandtabs()
# Use lexer whitelist
if (
lex
and (lexer := detect_lexer(markup, path=datum.path)) is not None
and lexer.name in _WHITELISTED_LEXERS
):
from prompt_toolkit.lexers.pygments import _token_cache
log.debug('Lexing output using "%s" lexer', lexer.name)
ft = [
(_token_cache[t], v) for _, t, v in lexer.get_tokens_unprocessed(markup)
]
else:
ft = to_formatted_text(markup)
return to_formatted_text(strip_one_trailing_newline(ft)) | Convert ANSI text to formatted text, lexing & formatting automatically. |
6,821 | from __future__ import annotations
from typing import TYPE_CHECKING
from euporie.core.convert.registry import register
from euporie.core.filters import have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `markdown_to_rich_py` function. Write a Python function `async def markdown_to_rich_py( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> Markdown` to solve the following problem:
Convert base64 encoded data to bytes.
Here is the function:
async def markdown_to_rich_py(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Markdown:
"""Convert base64 encoded data to bytes."""
from rich.markdown import Markdown
data = datum.data
markup = data.decode() if isinstance(data, bytes) else data
return Markdown(
markup,
# code_theme=str(config.syntax_theme),
# inline_code_theme=str(config.syntax_theme),
) | Convert base64 encoded data to bytes. |
6,822 | from __future__ import annotations
from functools import partial
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import (
chafa_convert_cmd,
chafa_convert_py,
imagemagick_convert,
)
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
def get_app() -> BaseApp:
"""Get the current active (running) Application."""
from euporie.core.app import BaseApp
session = _current_app_session.get()
if isinstance(session.app, BaseApp):
return session.app
# Use a baseapp as our "DummyApplication"
return BaseApp()
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `png_to_sixel_img2sixel` function. Write a Python function `async def png_to_sixel_img2sixel( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert PNG data to sixels :command:`img2sixel`.
Here is the function:
async def png_to_sixel_img2sixel(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert PNG data to sixels :command:`img2sixel`."""
cmd: list[Any] = ["img2sixel", "-I"]
if bg:
cmd += [f"--bgcolor={bg}"]
if cols is not None:
px, _ = get_app().term_info.cell_size_px
cmd += [f"--width={int(cols * px)}"]
return (await call_subproc(datum.data, cmd)).decode() | Convert PNG data to sixels :command:`img2sixel`. |
6,823 | from __future__ import annotations
from functools import partial
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import (
chafa_convert_cmd,
chafa_convert_py,
imagemagick_convert,
)
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `pil_to_sixel_py_timg` function. Write a Python function `async def pil_to_sixel_py_timg( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert a pillow image to sixels :py:mod:`timg`.
Here is the function:
async def pil_to_sixel_py_timg(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert a pillow image to sixels :py:mod:`timg`."""
import timg
return timg.SixelMethod(datum.data).to_string() | Convert a pillow image to sixels :py:mod:`timg`. |
6,824 | from __future__ import annotations
from functools import partial
from typing import TYPE_CHECKING
from euporie.core.convert.formats.common import (
chafa_convert_cmd,
chafa_convert_py,
imagemagick_convert,
)
from euporie.core.convert.registry import register
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
from euporie.core.filters import command_exists, have_modules
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `pil_to_sixel_py_teimpy` function. Write a Python function `async def pil_to_sixel_py_teimpy( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str` to solve the following problem:
Convert a pillow image to sixels :py:mod:`teimpy`.
Here is the function:
async def pil_to_sixel_py_teimpy(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str:
"""Convert a pillow image to sixels :py:mod:`teimpy`."""
import numpy as np
import teimpy
return teimpy.get_drawer(teimpy.Mode.SIXEL).draw(np.asarray(datum.data)) | Convert a pillow image to sixels :py:mod:`teimpy`. |
6,825 | from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from euporie.core.convert.registry import register
from euporie.core.filters import have_modules
log = logging.getLogger(__name__)
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `png_to_pil_py` function. Write a Python function `async def png_to_pil_py( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> PilImage` to solve the following problem:
Convert PNG to a pillow image using :py:mod:`PIL`.
Here is the function:
async def png_to_pil_py(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> PilImage:
"""Convert PNG to a pillow image using :py:mod:`PIL`."""
import io
from PIL import Image
try:
image = Image.open(io.BytesIO(datum.data))
image.load()
except OSError:
log.error("Could not load image.")
return Image.new(mode="P", size=(1, 1))
else:
return image | Convert PNG to a pillow image using :py:mod:`PIL`. |
6,826 | from __future__ import annotations
import base64
import logging
from typing import TYPE_CHECKING
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
import base64
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `base64_to_bytes_py` function. Write a Python function `async def base64_to_bytes_py( datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> bytes` to solve the following problem:
Convert base64 encoded data to bytes.
Here is the function:
async def base64_to_bytes_py(
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> bytes:
"""Convert base64 encoded data to bytes."""
data = datum.data
data_str = data.decode() if isinstance(data, bytes) else data
return base64.b64decode(data_str) | Convert base64 encoded data to bytes. |
6,827 | from __future__ import annotations
import base64
import logging
from typing import TYPE_CHECKING
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
def get_app() -> BaseApp:
"""Get the current active (running) Application."""
from euporie.core.app import BaseApp
session = _current_app_session.get()
if isinstance(session.app, BaseApp):
return session.app
# Use a baseapp as our "DummyApplication"
return BaseApp()
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `imagemagick_convert` function. Write a Python function `async def imagemagick_convert( output_format: str, datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str | bytes` to solve the following problem:
Convert image data to PNG bytes using ``imagemagick``.
Here is the function:
async def imagemagick_convert(
output_format: str,
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str | bytes:
"""Convert image data to PNG bytes using ``imagemagick``."""
cmd: list[Any] = ["convert", "-density", "300"]
app = get_app()
if cols is not None and hasattr(app, "term_info"):
px, _ = app.term_info.cell_size_px
cmd += ["-geometry", f"{int(cols * px)}"]
if bg:
cmd += ["-background", bg, "-flatten"]
cmd += ["-[0]", f"{output_format}:-"]
result: bytes | str = await call_subproc(datum.data, cmd)
if output_format in {"sixel", "svg"} and isinstance(result, bytes):
result = result.decode()
return result | Convert image data to PNG bytes using ``imagemagick``. |
6,828 | from __future__ import annotations
import base64
import logging
from typing import TYPE_CHECKING
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
async def call_subproc(
data: str | bytes,
cmd: list[Any],
use_tempfile: bool = False,
suffix: str = "",
) -> bytes:
"""Call the command as a subprocess and return it's output as bytes.
Args:
data: The data to pass to the subprocess
cmd: The command and arguments to call
use_tempfile: If True, the command saves its output to a file, not stdout
suffix: Suffix for the temporary file name
Returns:
The data printed to standard out by the subprocess.
"""
# Convert all command arguments to strings
cmd = list(map(str, cmd))
# Convert data to bytes
if isinstance(data, str):
data = data.encode()
if use_tempfile:
# If the command cannot read from stdin, create a temporary file to pass to
# the command
tfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
tfile.write(data)
tfile.close()
cmd.append(tfile.name)
stdinput = None
else:
stdinput = data
log.debug("Running external command `%s`", cmd)
error: Exception | None = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
output_bytes, _ = await proc.communicate(stdinput)
except FileNotFoundError as error_:
log.error("Could not run external command `%s`", cmd)
error = error_
except subprocess.CalledProcessError as error_:
log.error("There was an error while running external command `%s`", cmd)
error = error_
finally:
if error is not None:
# Generate an output stating there was an error
output_bytes = (
b"\x1b[33m" # Set fg to yellow
b"\xee\x82\xb6" # Draw left pill side
b"\x1b[43m\x1b[30m" # Set fg to black, bg to yellow
b"\xe2\x9a\xa0" # Draw warning symbol
b" Rendering Error"
b"\x1b[33m\x1b[49m" # Set fg to yellow, reset bg
b"\xee\x82\xb4" # Draw right pill side
b"\x1b[n" # Reset style
)
# TODO Log any stderr
# Clean up any temporary file
if use_tempfile:
tfile.close()
Path(tfile.name).unlink()
return output_bytes
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `chafa_convert_cmd` function. Write a Python function `async def chafa_convert_cmd( output_format: str, datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str | bytes` to solve the following problem:
Convert image data to ANSI text using :command:`chafa`.
Here is the function:
async def chafa_convert_cmd(
output_format: str,
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str | bytes:
"""Convert image data to ANSI text using :command:`chafa`."""
cmd: list[Any] = ["chafa", f"--format={output_format}"]
if cols is not None or rows is not None:
size = "--size="
if cols is not None:
size = f"{size}{cols}"
if rows is not None:
size = f"{size}x{rows}"
cmd.append(size)
if bg:
cmd += ["--bg", bg]
# cmd += ["--stretch", "/dev/stdin"]
return (await call_subproc(datum.data, cmd)).decode() | Convert image data to ANSI text using :command:`chafa`. |
6,829 | from __future__ import annotations
import base64
import logging
from typing import TYPE_CHECKING
from euporie.core.convert.utils import call_subproc
from euporie.core.current import get_app
def get_app() -> BaseApp:
"""Get the current active (running) Application."""
from euporie.core.app import BaseApp
session = _current_app_session.get()
if isinstance(session.app, BaseApp):
return session.app
# Use a baseapp as our "DummyApplication"
return BaseApp()
class Datum(Generic[T], metaclass=_MetaDatum):
"""Class for storing and converting display data."""
_pixel_size: tuple[int | None, int | None]
_hash: str
_root: ReferenceType[Datum]
_sizes: ClassVar[dict[str, tuple[ReferenceType[Datum], Size]]] = {}
def __init__(
self,
data: T,
format: str,
px: int | None = None,
py: int | None = None,
fg: str | ColorPaletteColor | None = None,
bg: str | ColorPaletteColor | None = None,
path: Path | None = None,
source: Datum | None = None,
align: WindowAlign = WindowAlign.LEFT,
) -> None:
"""Create a new instance of display data."""
self.data: T = data
self.format = format
self.px, self.py = px, py
self.fg = str(fg) if fg is not None else None
self.bg = str(bg) if bg is not None else None
self.path = path
self.source: ReferenceType[Datum] = ref(source) if source else ref(self)
self.align = align
self._cell_size: tuple[int, float] | None = None
self._conversions: dict[
tuple[str, int | None, int | None, str | None, str | None, bool], T | None
] = {}
self._finalizer = finalize(self, self._cleanup_datum_sizes, self.hash)
self._finalizer.atexit = False
def __repr__(self) -> str:
"""Return a string representation of object."""
return f"{self.__class__.__name__}(format={self.format!r})"
def _cleanup_datum_sizes(cls, data_hash: str) -> None:
"""Remove all sizes for a given datum hash."""
size_instances = cls._sizes
for key, (datum_ref, _size) in list(size_instances.items()):
datum = datum_ref()
if not datum or datum.hash == data_hash:
del size_instances[key]
del datum
def to_bytes(self) -> bytes:
"""Cast the data to bytes."""
data = self.data
if isinstance(data, str):
return data.encode()
elif isinstance(data, list):
return to_plain_text(data).encode()
elif isinstance(data, PilImage):
return data.tobytes()
elif isinstance(data, bytes):
return data
else:
return b"Error"
def get_hash(data: Any) -> str:
"""Calculate a hash of data."""
if isinstance(data, str):
hash_data = data.encode()
elif isinstance(data, PilImage):
hash_data = data.tobytes()
else:
hash_data = data
return hashlib.sha1(hash_data).hexdigest() # noqa S324
def hash(self) -> str:
"""Return a hash of the `Datum`'s data."""
try:
return self._hash
except AttributeError:
value = self.get_hash(self.to_bytes())
self._hash = value
return value
def root(self) -> Datum:
"""Retrieve the source datum of any conversion outputs."""
try:
return self._root() or self
except AttributeError:
root = self
while True:
if (source := root.source()) == root or source is None:
break
else:
root = source
self._root = ref(root or self)
return root
async def convert_async(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
bbox: DiInt | None = None,
) -> Any:
"""Perform conversion asynchronously, caching the result."""
if to == self.format:
# TODO - crop
return self.data
if not fg and hasattr(app := get_app(), "color_palette"):
fg = self.fg or app.color_palette.fg.base_hex
if not bg and hasattr(app := get_app(), "color_palette"):
bg = self.bg or app.color_palette.bg.base_hex
if (key := (to, cols, rows, fg, bg, extend)) in self._conversions:
return self._conversions[key]
routes = _CONVERTOR_ROUTE_CACHE[(self.format, to)]
# log.debug(
# "Converting from '%s' to '%s' using route: %s", self, to, routes
# )
output: T | None = None
if routes:
datum = self
output = None
for route in routes:
for stage_a, stage_b in zip(route, route[1:]):
key = (stage_b, cols, rows, fg, bg, extend)
if key in self._conversions:
output = self._conversions[key]
else:
# Find converter with lowest weight
func = sorted(
[
conv
for conv in converters[stage_b][stage_a]
if _FILTER_CACHE.get((conv,), conv.filter_)
],
key=lambda x: x.weight,
)[0].func
try:
output = await func(datum, cols, rows, fg, bg, extend)
self._conversions[key] = output
except Exception:
log.exception("An error occurred during format conversion")
output = None
if output is None:
log.error(
"Failed to convert `%s`"
" to `%s` using route `%s` at stage `%s`",
self,
to,
route,
stage_b,
)
# Try the next route on error
break
if stage_b != route[-1]:
datum = Datum(
data=output,
format=stage_b,
px=self.px,
py=self.py,
fg=fg,
bg=bg,
path=self.path,
source=datum,
)
else:
# If this route succeeded, stop trying routes
break
# Crop or pad output
# if bbox and any(bbox):
if output is None:
output = ERROR_OUTPUTS.get(to, "(Conversion Error)")
return output
def _to_sync(self, coro: Coroutine) -> Any:
"""Call an async method synchronously."""
future = asyncio.run_coroutine_threadsafe(coro, get_loop())
return future.result()
def convert(
self,
to: str,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> Any:
"""Convert between formats."""
return self._to_sync(self.convert_async(to, cols, rows, fg, bg, extend))
async def pixel_size_async(self) -> tuple[int | None, int | None]:
"""Get the dimensions of displayable data in pixels.
Foreground and background color are set at this point if they are available, as
data conversion outputs are cached and re-used.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
try:
return self._pixel_size
except AttributeError:
px, py = self.px, self.py
# Do not bother trying if the format is ANSI
if self.format != "ansi" and (px is None or py is None):
# Try using imagesize to get the size of the output
if (
self.format not in {"png", "svg", "jpeg", "gif", "tiff"}
and _CONVERTOR_ROUTE_CACHE[(self.format, "png")]
):
data = await self.convert_async(to="png")
else:
data = self.data
if isinstance(data, str):
data = data.encode()
try:
px_calc, py_calc = imagesize.get(io.BytesIO(data))
except ValueError:
pass
else:
if px is None and px_calc > 0:
if py is not None and py_calc > 0:
px = px_calc * py / py_calc
else:
px = px_calc
if py is None and py_calc > 0:
if px is not None and px_calc > 0:
py = py_calc * px / px_calc
else:
py = py_calc
self._pixel_size = (px, py)
return self._pixel_size
def pixel_size(self) -> Any:
"""Get data dimensions synchronously."""
return self._to_sync(self.pixel_size_async())
async def cell_size_async(self) -> tuple[int, float]:
"""Get the cell width and aspect ratio of the displayable data.
Returns:
A tuple of the data's width in terminal columns and its aspect ratio, when
converted to a image.
"""
if self._cell_size is None:
cols, aspect = 0, 0.0
px, py = await self.pixel_size_async()
if px is not None and py is not None:
app = get_app()
if hasattr(app, "term_info"):
cell_px, cell_py = app.term_info.cell_size_px
else:
cell_px, cell_py = 10, 20
cols = max(1, int(px // cell_px))
aspect = (py / cell_py) / (px / cell_px)
self._cell_size = cols, aspect
return self._cell_size
def cell_size(self) -> Any:
"""Get cell width and aspect synchronously."""
return self._to_sync(self.cell_size_async())
# def crop(self, bbox: DiInt) -> T:
# """Crop displayable data."""
def add_size(self, size: tuple[int, int] | Size) -> str:
"""Store a size for a :py:class`Datum`."""
sized_datum = (ref(self), Size(*size))
key = str(hash(sized_datum))
self._sizes[key] = sized_datum
return key
def get_size(cls, key: str) -> tuple[Datum, Size] | None:
"""Retrieve a :py:class:`Datum` and it's size by its key."""
if sized_datum := cls._sizes.get(key):
datum_ref, size = sized_datum
if datum := datum_ref():
return datum, size
return None
The provided code snippet includes necessary dependencies for implementing the `chafa_convert_py` function. Write a Python function `async def chafa_convert_py( output_format: Literal["symbols", "sixels", "kitty", "iterm2"], datum: Datum, cols: int | None = None, rows: int | None = None, fg: str | None = None, bg: str | None = None, extend: bool = True, ) -> str | bytes` to solve the following problem:
Convert image data to ANSI text using ::`chafa.py`.
Here is the function:
async def chafa_convert_py(
output_format: Literal["symbols", "sixels", "kitty", "iterm2"],
datum: Datum,
cols: int | None = None,
rows: int | None = None,
fg: str | None = None,
bg: str | None = None,
extend: bool = True,
) -> str | bytes:
"""Convert image data to ANSI text using ::`chafa.py`."""
from chafa.chafa import Canvas, CanvasConfig, PixelMode, PixelType
from PIL import Image
pil_mode_to_pixel_type = {
"RGBa": PixelType.CHAFA_PIXEL_RGBA8_PREMULTIPLIED,
"RGBA": PixelType.CHAFA_PIXEL_RGBA8_UNASSOCIATED,
"RGB": PixelType.CHAFA_PIXEL_RGB8,
}
str_to_pixel_mode = {
"symbols": PixelMode.CHAFA_PIXEL_MODE_SYMBOLS,
"sixels": PixelMode.CHAFA_PIXEL_MODE_SIXELS,
"kitty": PixelMode.CHAFA_PIXEL_MODE_KITTY,
"iterm2": PixelMode.CHAFA_PIXEL_MODE_ITERM2,
}
# Convert PIL image to format that chafa can use
data = datum.data
# Always convert the image, as unconverted images sometime result in an off-by-one
# line width errors resulting in diagonal image striping for some reason
data = data.convert("RGBA", palette=Image.Palette.ADAPTIVE, colors=16)
# Init canvas config
config = CanvasConfig()
# Set output mode
config.pixel_mode = str_to_pixel_mode[output_format]
# Configure the canvas geometry based on our cell size
if hasattr(app := get_app(), "term_info"):
px, py = app.term_info.cell_size_px
else:
px, py = 10, 20
config.cell_width, config.cell_height = px, py
# Set canvas height and width
if cols:
config.width = cols
if rows:
config.height = max(1, rows)
# If we don't have specified, use the image's aspect
else:
config.height = max(1, int(cols / data.size[0] * data.size[1] * px / py))
# Set the foreground color
if fg and (color := fg.lstrip("#")):
config.fg_color = (
int(color[0:2], 16),
int(color[2:4], 16),
int(color[4:6], 16),
)
# Set the background color
if bg and (color := bg.lstrip("#")):
config.bg_color = (
int(color[0:2], 16),
int(color[2:4], 16),
int(color[4:6], 16),
)
# Init the canvas
canvas = Canvas(config)
# Draw to canvas
canvas.draw_all_pixels(
pil_mode_to_pixel_type.get(data.mode, PixelType.CHAFA_PIXEL_RGBA8_UNASSOCIATED),
list(data.tobytes()),
width := data.width,
data.height,
width * len(data.getbands()),
)
# Return the output
return canvas.print(fallback=True).decode() | Convert image data to ANSI text using ::`chafa.py`. |
6,830 | from __future__ import annotations
import asyncio
import hashlib
import inspect
import io
import logging
import threading
from typing import TYPE_CHECKING, Generic, TypeVar
from weakref import ReferenceType, WeakValueDictionary, finalize, ref
import imagesize
from PIL.Image import Image as PilImage
from prompt_toolkit.data_structures import Size
from prompt_toolkit.layout.containers import WindowAlign
from euporie.core.convert.registry import (
_CONVERTOR_ROUTE_CACHE,
_FILTER_CACHE,
converters,
)
from euporie.core.current import get_app
from euporie.core.ft.utils import to_plain_text
_IO_THREAD: list[threading.Thread | None] = [None]
_LOOP: list[asyncio.AbstractEventLoop | None] = [
None
]
The provided code snippet includes necessary dependencies for implementing the `get_loop` function. Write a Python function `def get_loop() -> asyncio.AbstractEventLoop` to solve the following problem:
Create or return the conversion IO loop. The loop will be running on a separate thread.
Here is the function:
def get_loop() -> asyncio.AbstractEventLoop:
"""Create or return the conversion IO loop.
The loop will be running on a separate thread.
"""
if _LOOP[0] is None:
loop = asyncio.new_event_loop()
_LOOP[0] = loop
thread = threading.Thread(
target=loop.run_forever, name="EuporieConvertIO", daemon=True
)
thread.start()
_IO_THREAD[0] = thread
assert _LOOP[0] is not None
# Check we are not already in the conversion event loop
try:
running_loop = asyncio.get_running_loop()
except RuntimeError:
running_loop = None
if _LOOP[0] is running_loop:
raise NotImplementedError(
"Cannot call `convert` from the conversion event loop"
)
return _LOOP[0] | Create or return the conversion IO loop. The loop will be running on a separate thread. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.